CombinedText stringlengths 4 3.42M |
|---|
#!/usr/bin/env python
from __future__ import print_function
import fileinput
import glob
import os
import shutil
import sys
### Begin compatibility block for pre-v2.6: ###
#
# ignore_patterns and copytree funtions are copies of what is included
# in shutil.copytree of python v2.6 and later.
#
### When compatibility is no-longer needed, this block
### can be replaced with:
###
### from shutil import ignore_patterns, copytree
###
### or the "shutil." qualifier can be prepended to the function
### names where they are used.
try:
WindowsError
except NameError:
WindowsError = None
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
import fnmatch
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None):
"""Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
"""
from shutil import copy2, Error, copystat
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
### End compatibility block for pre-v2.6 ###
def copy_if_out_of_date(original, derived):
if (not os.path.exists(derived) or
os.stat(derived).st_mtime < os.stat(original).st_mtime):
try:
shutil.copyfile(original, derived)
except IOError:
if os.path.basename(original) == 'matplotlibrc':
msg = "'%s' not found. " % original + \
"Did you run `python setup.py build`?"
raise IOError(msg)
else:
raise
def check_build():
build_dirs = ['../build', '../build/doctrees', '../build/html', '../build/latex',
'../build/texinfo', '_static', '_templates', 'users']
for d in build_dirs:
try:
os.mkdir(d)
except OSError:
pass
def doctest():
os.system('sphinx-build -b doctest -d ../build/doctrees . ../build/doctest')
def linkcheck():
os.system('sphinx-build -b linkcheck -d ../build/doctrees . ../build/linkcheck')
def html():
check_build()
#copy_if_out_of_date('../lib/matplotlib/mpl-data/matplotlibrc', '_static/matplotlibrc')
if small_docs:
options = "-D plot_formats=\"[('png', 80)]\""
else:
options = ''
if os.system('sphinx-build %s -b html -d ../build/doctrees . ../build/html' % options):
raise SystemExit("Building HTML failed.")
figures_dest_path = '../build/html/pyplots'
if os.path.exists(figures_dest_path):
shutil.rmtree(figures_dest_path)
copytree(
'pyplots', figures_dest_path,
ignore=ignore_patterns("*.pyc"))
# Clean out PDF files from the _images directory
for filename in glob.glob('../build/html/_images/*.pdf'):
os.remove(filename)
shutil.copy('../../CHANGELOG', '../build/html/_static/CHANGELOG')
def latex():
check_build()
#figs()
if sys.platform != 'win32':
# LaTeX format.
if os.system('sphinx-build -b latex -d build/doctrees . build/latex'):
raise SystemExit("Building LaTeX failed.")
# Produce pdf.
os.chdir('../build/latex')
# Call the makefile produced by sphinx...
if os.system('make'):
raise SystemExit("Rendering LaTeX failed.")
os.chdir('../..')
else:
print('latex build has not been tested on windows')
def texinfo():
check_build()
#figs()
if sys.platform != 'win32':
# Texinfo format.
if os.system(
'sphinx-build -b texinfo -d ../build/doctrees . ../build/texinfo'):
raise SystemExit("Building Texinfo failed.")
# Produce info file.
os.chdir('../build/texinfo')
# Call the makefile produced by sphinx...
if os.system('make'):
raise SystemExit("Rendering Texinfo failed.")
os.chdir('../..')
else:
print('texinfo build has not been tested on windows')
def clean():
shutil.rmtree("../build", ignore_errors=True)
shutil.rmtree("examples", ignore_errors=True)
for pattern in ['scot_examples/api/*.png',
'scot_examples/pylab_examples/*.png',
'scot_examples/pylab_examples/*.pdf',
'scot_examples/units/*.png',
'pyplots/tex_demo.png',
'_static/matplotlibrc',
'_templates/gallery.html',
'users/installing.rst']:
for filename in glob.glob(pattern):
if os.path.exists(filename):
os.remove(filename)
def all():
#figs()
html()
latex()
funcd = {
'html' : html,
'latex' : latex,
'texinfo' : texinfo,
'clean' : clean,
'all' : all,
'doctest' : doctest,
'linkcheck': linkcheck,
}
small_docs = False
# Change directory to the one containing this file
current_dir = os.getcwd()
filedir = os.path.dirname(os.path.join(current_dir, __file__))
os.chdir(os.path.join(filedir, 'source'))
check_build()
copy_if_out_of_date('../../INSTALL', 'users/installing.rst')
# Create the examples symlink, if it doesn't exist
required_symlinks = [
('scot_examples', '../../examples/'),
]
for link, target in required_symlinks:
if not os.path.exists(link):
if hasattr(os, 'symlink'):
os.symlink(target, link)
else:
shutil.copytree(os.path.join(link, '..', target), link)
if len(sys.argv)>1:
if '--small' in sys.argv[1:]:
small_docs = True
sys.argv.remove('--small')
for arg in sys.argv[1:]:
func = funcd.get(arg)
if func is None:
raise SystemExit('Do not know how to handle %s; valid args are %s'%(
arg, funcd.keys()))
func()
else:
small_docs = False
all()
os.chdir(current_dir)
Fixed symlink creation on windows
#!/usr/bin/env python
from __future__ import print_function
import fileinput
import glob
import os
import shutil
import sys
### Begin compatibility block for pre-v2.6: ###
#
# ignore_patterns and copytree funtions are copies of what is included
# in shutil.copytree of python v2.6 and later.
#
### When compatibility is no-longer needed, this block
### can be replaced with:
###
### from shutil import ignore_patterns, copytree
###
### or the "shutil." qualifier can be prepended to the function
### names where they are used.
try:
WindowsError
except NameError:
WindowsError = None
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
import fnmatch
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None):
"""Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
"""
from shutil import copy2, Error, copystat
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
### End compatibility block for pre-v2.6 ###
def copy_if_out_of_date(original, derived):
if (not os.path.exists(derived) or
os.stat(derived).st_mtime < os.stat(original).st_mtime):
try:
shutil.copyfile(original, derived)
except IOError:
if os.path.basename(original) == 'matplotlibrc':
msg = "'%s' not found. " % original + \
"Did you run `python setup.py build`?"
raise IOError(msg)
else:
raise
def check_build():
build_dirs = ['../build', '../build/doctrees', '../build/html', '../build/latex',
'../build/texinfo', '_static', '_templates', 'users']
for d in build_dirs:
try:
os.mkdir(d)
except OSError:
pass
def doctest():
os.system('sphinx-build -b doctest -d ../build/doctrees . ../build/doctest')
def linkcheck():
os.system('sphinx-build -b linkcheck -d ../build/doctrees . ../build/linkcheck')
def html():
check_build()
#copy_if_out_of_date('../lib/matplotlib/mpl-data/matplotlibrc', '_static/matplotlibrc')
if small_docs:
options = "-D plot_formats=\"[('png', 80)]\""
else:
options = ''
if os.system('sphinx-build %s -b html -d ../build/doctrees . ../build/html' % options):
raise SystemExit("Building HTML failed.")
figures_dest_path = '../build/html/pyplots'
if os.path.exists(figures_dest_path):
shutil.rmtree(figures_dest_path)
copytree(
'pyplots', figures_dest_path,
ignore=ignore_patterns("*.pyc"))
# Clean out PDF files from the _images directory
for filename in glob.glob('../build/html/_images/*.pdf'):
os.remove(filename)
shutil.copy('../../CHANGELOG', '../build/html/_static/CHANGELOG')
def latex():
check_build()
#figs()
if sys.platform != 'win32':
# LaTeX format.
if os.system('sphinx-build -b latex -d build/doctrees . build/latex'):
raise SystemExit("Building LaTeX failed.")
# Produce pdf.
os.chdir('../build/latex')
# Call the makefile produced by sphinx...
if os.system('make'):
raise SystemExit("Rendering LaTeX failed.")
os.chdir('../..')
else:
print('latex build has not been tested on windows')
def texinfo():
check_build()
#figs()
if sys.platform != 'win32':
# Texinfo format.
if os.system(
'sphinx-build -b texinfo -d ../build/doctrees . ../build/texinfo'):
raise SystemExit("Building Texinfo failed.")
# Produce info file.
os.chdir('../build/texinfo')
# Call the makefile produced by sphinx...
if os.system('make'):
raise SystemExit("Rendering Texinfo failed.")
os.chdir('../..')
else:
print('texinfo build has not been tested on windows')
def clean():
shutil.rmtree("../build", ignore_errors=True)
shutil.rmtree("examples", ignore_errors=True)
for pattern in ['scot_examples/api/*.png',
'scot_examples/pylab_examples/*.png',
'scot_examples/pylab_examples/*.pdf',
'scot_examples/units/*.png',
'pyplots/tex_demo.png',
'_static/matplotlibrc',
'_templates/gallery.html',
'users/installing.rst']:
for filename in glob.glob(pattern):
if os.path.exists(filename):
os.remove(filename)
def all():
#figs()
html()
latex()
funcd = {
'html' : html,
'latex' : latex,
'texinfo' : texinfo,
'clean' : clean,
'all' : all,
'doctest' : doctest,
'linkcheck': linkcheck,
}
small_docs = False
# Change directory to the one containing this file
current_dir = os.getcwd()
filedir = os.path.dirname(os.path.join(current_dir, __file__))
os.chdir(os.path.join(filedir, 'source'))
check_build()
copy_if_out_of_date('../../INSTALL', 'users/installing.rst')
# Create the examples symlink, if it doesn't exist
required_symlinks = [
('scot_examples', '../../examples/'),
]
for link, target in required_symlinks:
if not os.path.exists(link):
try:
os.symlink(target, link)
except OSError:
shutil.copytree(os.path.join(link, '..', target), link)
if len(sys.argv)>1:
if '--small' in sys.argv[1:]:
small_docs = True
sys.argv.remove('--small')
for arg in sys.argv[1:]:
func = funcd.get(arg)
if func is None:
raise SystemExit('Do not know how to handle %s; valid args are %s'%(
arg, funcd.keys()))
func()
else:
small_docs = False
all()
os.chdir(current_dir)
|
from .forms import ClientForm
from .forms import CompanyForm
from .forms import ContactForm
from .forms import ContractForm
from .forms import ContractSettingsForm
from .forms import EstimateForm
from .forms import InvoiceForm
from .forms import MailForm
from .forms import NewsletterForm
from .forms import NoteForm
from .forms import ProfileForm
from .forms import ProjectForm
from .forms import ProposalForm
from .forms import ReportForm
from .forms import SettingsForm as AppSettingsForm
from .forms import ServiceForm
from .forms import TaskForm
from .forms import TimeForm
from .models import Client
from .models import Company
from .models import Contact
from .models import Contract
from .models import ContractSettings
from .models import Estimate
from .models import Invoice
from .models import Log
from .models import Newsletter
from .models import Note
from .models import Profile
from .models import Project
from .models import Proposal
from .models import Report
from .models import Service
from .models import Settings as AppSettings
from .models import Testimonial
from .models import Task
from .models import Time
from .serializers import ClientSerializer
from .serializers import ProfileSerializer
from .serializers import ServiceSerializer
from .serializers import TestimonialSerializer
from .utils import add_user_to_contacts
from .utils import get_index_items
from .utils import get_page_items
from .utils import create_and_send_mail
from .utils import edit
from .utils import generate_doc
from .utils import get_client_city
from .utils import get_company_name
from .utils import get_setting
from .utils import get_template_and_url_names
from .utils import get_query
from .utils import send_mail
from datetime import datetime
from django.contrib import messages
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models import F, Sum
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django_xhtml2pdf.utils import generate_pdf
from io import BytesIO
from matplotlib.dates import DateFormatter
from matplotlib.dates import MonthLocator
from matplotlib.dates import date2num
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from rest_framework import viewsets
# Create your views here.
class ClientViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Client.objects.filter(published=True).order_by('name')
serializer_class = ClientSerializer
class ServiceViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Service.objects.filter(active=True).order_by('name')
serializer_class = ServiceSerializer
class TestimonialViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Testimonial.objects.filter(active=True).order_by('-issue_date')
serializer_class = TestimonialSerializer
class ProfileViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Profile.objects.filter(
published=True).order_by('user__first_name')
serializer_class = ProfileSerializer
@staff_member_required
def client(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
contact_model=Contact,
contract_model=Contract,
model=Client,
pk=pk,
project_model=Project)
return render(request, 'client.html', context)
@staff_member_required
def client_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'client', page_type='edit')
return edit(
request,
ClientForm,
Client,
url_name,
template_name,
active_nav='client',
pk=pk)
@staff_member_required
def client_index(request):
search_fields = ('address', 'name')
context = get_index_items(
request,
Client,
search_fields,
active_nav='client',
app_settings_model=AppSettings,
edit_url='client_edit', # Delete modal
order_by=('-active', '-updated', 'name'),
show_search=True)
return render(request, 'client_index.html', context)
@staff_member_required
def company_edit(request, pk=None):
return edit(
request,
CompanyForm,
Company,
'company',
'company_edit.html',
active_nav='dropdown',
pk=1)
@staff_member_required
def company(request):
context = {}
company = Company.get_solo()
services = company.service_set.all().order_by('-updated')
context['active_nav'] = 'dropdown'
context['active_tab'] = 'company'
context['company'] = company
context['notes'] = company.note.all()
context['services'] = services
return render(request, 'company.html', context)
@staff_member_required
def contact(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
context['active_nav'] = 'contact'
context['edit_url'] = 'contact_edit' # Delete modal
context['item'] = contact
return render(request, 'contact.html', context)
@staff_member_required
def contact_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'contact', page_type='edit')
return edit(
request,
ContactForm,
Contact,
url_name,
template_name,
active_nav='contact',
pk=pk)
@staff_member_required
def contact_index(request):
search_fields = ('first_name', 'last_name', 'email', 'notes')
context = get_index_items(
request,
Contact,
search_fields,
active_nav='contact',
app_settings_model=AppSettings,
edit_url='contact_edit', # Delete modal
order_by=('-updated', '-active', 'first_name'),
show_search=True)
return render(request, 'contact_index.html', context)
@staff_member_required
def contact_mail(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
if request.method == 'POST' and create_and_send_mail(
request, Log, mail_form=MailForm, contact=contact, pk=pk):
return HttpResponseRedirect(reverse('contact', kwargs={'pk': pk}))
else:
form = MailForm()
context['active_nav'] = 'contact'
context['contact'] = contact
context['form'] = form
return render(request, 'contact_mail.html', context)
def contact_unsubscribe(request, pk=None):
contact = get_object_or_404(Contact, pk=pk)
uuid = request.GET.get('id')
if uuid == contact.uuid:
contact.subscribed = False
contact.save()
messages.add_message(request, messages.SUCCESS,
'You have been unsubscribed!')
log = Log(entry='%s unsubscribed.' % contact.email)
log.save()
return HttpResponseRedirect(reverse('home'))
else:
messages.add_message(request, messages.WARNING, 'Nothing to see here.')
return HttpResponseRedirect(reverse('home'))
@staff_member_required
def contract(request, pk=None):
"""
"""
context = get_page_items(
request, company_model=Company, model=Contract, pk=pk, time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = get_company_name(company)
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_contract.html', context=context, file_object=response)
if context['doc']:
# https://stackoverflow.com/a/24122313/185820
document = generate_doc(contract)
filename = get_company_name(company)
f = BytesIO()
document.save(f)
length = f.tell()
f.seek(0)
content_type = 'application/vnd.openxmlformats-'
content_type += 'officedocument.wordprocessingml.document'
response = HttpResponse(f.getvalue(), content_type=content_type)
response['Content-Disposition'] = 'filename=%s.docx' % filename
response['Content-Length'] = length
return response
else:
return render(request, 'contract.html', context)
@staff_member_required
def contract_edit(request, pk=None):
"""
"""
template_name, url_name = get_template_and_url_names(
'contract', page_type='edit')
return edit(
request,
ContractForm,
Contract,
url_name,
template_name,
active_nav='contract',
pk=pk)
@staff_member_required
def contract_index(request):
"""
"""
search_fields = ()
context = get_index_items(
request,
Contract,
search_fields,
active_nav='contract',
app_settings_model=AppSettings,
order_by=('-created', ))
return render(request, 'contract_index.html', context)
@staff_member_required
def contract_settings(request):
context = {}
fields = {}
contract_settings = ContractSettings.get_solo()
for field in contract_settings._meta.fields:
if field.description == 'Text' and field.name != 'body':
fields[field.name] = {}
fields[field.name]['name'] = field.verbose_name
fields[field.name]['value'] = getattr(contract_settings,
field.name)
context['fields'] = fields
context['active_tab'] = 'contract'
context['active_nav'] = 'dropdown'
return render(request, 'contract_settings.html', context)
@staff_member_required
def contract_settings_edit(request, pk=None):
return edit(
request,
ContractSettingsForm,
ContractSettings,
'contract_settings',
'contract_settings_edit.html',
pk=1,
active_nav='dropdown')
@staff_member_required
def estimate(request, pk=None):
context = get_page_items(
request, company_model=Company, model=Estimate, pk=pk, time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = '-'.join(['estimate', pk])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'estimate.html', context)
@staff_member_required
def estimate_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'estimate', page_type='edit')
return edit(
request,
EstimateForm,
Estimate,
url_name,
template_name,
active_nav='estimate',
company_model=Company,
pk=pk)
@staff_member_required
def estimate_index(request):
search_fields = ('subject', )
context = get_index_items(
request,
Estimate,
search_fields,
active_nav='estimate',
app_settings_model=AppSettings,
edit_url='estimate_edit', # Delete modal
order_by=('-issue_date', ),
show_search=True)
context['company'] = company
return render(request, 'estimate_index.html', context)
@staff_member_required
def estimate_mail(request, pk=None):
estimate = get_object_or_404(Estimate, pk=pk)
if create_and_send_mail(
request, Log, estimate=estimate, profile_model=Profile):
return HttpResponseRedirect(reverse('estimate', kwargs={'pk': pk}))
def home(request):
context = get_page_items(
request,
app_settings_model=AppSettings,
invoice_model=Invoice,
note_model=Note,
project_model=Project,
report_model=Report)
return render(request, 'home.html', context)
@staff_member_required
def invoice(request, pk=None):
context = get_page_items(
request, Invoice, company_model=Company, pk=pk, time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = '_'.join(['invoice', pk])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'invoice.html', context)
@staff_member_required
def invoice_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'invoice', page_type='edit')
return edit(
request,
InvoiceForm,
Invoice,
url_name,
template_name,
active_nav='invoice',
company_model=Company,
pk=pk, )
@staff_member_required
def invoice_index(request):
search_fields = (
'client__name',
'document_id',
'issue_date',
'project__name',
'subject', )
context = get_index_items(
request,
Invoice,
search_fields,
active_nav='invoice',
app_settings_model=AppSettings,
edit_url='invoice_edit', # Delete modal
order_by=('-issue_date', ),
show_search=True)
return render(request, 'invoice_index.html', context)
def login(request):
context = {}
context['login'] = True
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
# https://stackoverflow.com/a/39316967/185820
auth_login(request, user)
city_data = get_client_city(request)
log = Log(entry='%s logged in from %s' % (user, city_data))
log.save()
return HttpResponseRedirect(reverse('home'))
else:
messages.add_message(request, messages.WARNING, 'Login failed.')
return HttpResponseRedirect(reverse('home'))
return render(request, 'login.html', context)
@staff_member_required
def log_index(request):
search_fields = ('entry', )
context = get_index_items(
request,
Log,
search_fields,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-created', ), )
return render(request, 'log_index.html', context)
@staff_member_required
def newsletter(request, pk=None):
"""
"""
context = {}
newsletter = get_object_or_404(Newsletter, pk=pk)
context['active_nav'] = 'dropdown'
context['contacts'] = newsletter.contacts.all().order_by('first_name')
context['edit_url'] = 'newsletter_edit'
context['item'] = newsletter
return render(request, 'newsletter.html', context)
@staff_member_required
def newsletter_edit(request, pk=None):
"""
"""
template_name, url_name = get_template_and_url_names(
'newsletter', page_type='edit')
return edit(
request,
NewsletterForm,
Newsletter,
url_name,
template_name,
active_nav='dropdown',
pk=pk)
@staff_member_required
def newsletter_index(request, pk=None):
"""
"""
search_fields = ('text', )
context = get_index_items(
request,
Newsletter,
search_fields,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-created', ))
return render(request, 'newsletter_index.html', context)
@staff_member_required
def newsletter_send(request, pk=None):
"""
"""
context = {}
newsletter = get_object_or_404(Newsletter, pk=pk)
contacts = newsletter.contacts.all().order_by('first_name')
for contact in contacts:
url = reverse('contact_unsubscribe', kwargs={'pk': contact.pk})
url = ''.join([request.get_host(), url])
to = contact.email
first_name = contact.first_name
subject = newsletter.subject
message = newsletter.text
if send_mail(
request,
subject,
message,
to,
url=url,
uuid=contact.uuid,
first_name=first_name):
log = Log(entry='Mail sent to %s.' % to)
log.save()
messages.add_message(request, messages.SUCCESS, 'Batch mail sent!')
context['active_nav'] = 'newsletter'
context['contacts'] = contacts
context['edit_url'] = 'newsletter_edit'
context['item'] = newsletter
return render(request, 'newsletter.html', context)
@staff_member_required
def note(request, pk=None):
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
note = get_object_or_404(Note, pk=pk)
notes = Note.objects.filter(note=note)
notes = notes.order_by('-pk')
context['active_nav'] = 'note'
context['edit_url'] = 'note_edit'
context['item'] = note
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=note-%s.pdf' % pk
return generate_pdf(
'pdf_note.html', context=context, file_object=response)
else:
return render(request, 'note.html', context)
# https://stackoverflow.com/a/42038839/185820
@staff_member_required(login_url='login')
def note_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'note', page_type='edit')
return edit(
request,
NoteForm,
Note,
url_name,
template_name,
active_nav='note',
company_model=Company,
pk=pk)
@staff_member_required
def note_index(request, pk=None):
search_fields = ('note', )
filters = {'hidden': False, }
context = get_index_items(
request,
Note,
search_fields,
active_nav='note',
app_settings_model=AppSettings,
filters=filters,
order_by=('-active', '-updated', 'note', 'due_date', 'priority'),
show_search=True)
context['edit_url'] = 'note_edit' # Delete modal
return render(request, 'note_index.html', context)
@staff_member_required
def project(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
model=Project,
estimate_model=Estimate,
invoice_model=Invoice,
time_model=Time,
pk=pk)
return render(request, 'project.html', context)
@staff_member_required
def project_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'project', page_type='edit')
return edit(
request,
ProjectForm,
Project,
url_name,
template_name,
active_nav='project',
pk=pk)
@staff_member_required
def project_index(request, pk=None):
search_fields = ('id', 'name')
context = get_index_items(
request,
Project,
search_fields,
active_nav='project',
app_settings_model=AppSettings,
edit_url='project_edit', # Delete modal
order_by=(
'-updated',
'-active', ),
show_search=True)
return render(request, 'project_index.html', context)
@staff_member_required
def proposal(request, pk=None):
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
proposal = get_object_or_404(Proposal, pk=pk)
context['active_nav'] = 'dropdown'
context['edit_url'] = 'proposal_edit'
context['item'] = proposal
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=proposal-%s.pdf' % pk
return generate_pdf(
'pdf_proposal.html', context=context, file_object=response)
else:
return render(request, 'proposal.html', context)
def proposal_edit(request, pk=None):
"""
"""
template_name, url_name = get_template_and_url_names(
'proposal', page_type='edit')
return edit(
request,
ProposalForm,
Proposal,
url_name,
template_name,
active_nav='dropdown',
company_model=Company,
pk=pk)
@staff_member_required
def proposal_index(request, pk=None):
search_fields = ()
context = get_index_items(
request,
Proposal,
search_fields,
active_nav='dropdown',
app_settings_model=AppSettings,
show_search=True)
context['edit_url'] = 'proposal_edit' # Delete modal
return render(request, 'proposal_index.html', context)
@staff_member_required
def report(request, pk=None):
company = Company.get_solo()
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
report = get_object_or_404(Report, pk=pk)
reports = Report.objects.filter(active=True)
reports = reports.aggregate(gross=Sum(F('gross')), net=Sum(F('net')))
context['active_nav'] = 'dropdown'
context['company'] = company
context['cost'] = report.gross - report.net
context['edit_url'] = 'report_edit' # Delete modal
context['item'] = report
context['reports'] = reports
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=report-%s.pdf' % pk
return generate_pdf(
'pdf_report.html', context=context, file_object=response)
else:
return render(request, 'report.html', context)
@staff_member_required
def report_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'report', page_type='edit')
return edit(
request,
ReportForm,
Report,
url_name,
template_name,
active_nav='dropdown',
invoice_model=Invoice,
pk=pk)
@staff_member_required
def report_index(request):
show_plot = False
reports = Report.objects.filter(active=True)
plot_items = reports # Save for plotting
reports = reports.aggregate(gross=Sum(F('gross')), net=Sum(F('net')))
company = Company.get_solo()
search_fields = ('id', 'name', 'gross', 'net')
context = get_index_items(
request,
Report,
search_fields,
active_nav='dropdown',
app_settings_model=AppSettings,
edit_url='report_edit', # Delete modal
order_by=('-date', ),
show_search=True)
if reports['gross'] is not None and reports['net'] is not None:
cost = reports['gross'] - reports['net']
else:
reports['gross'] = 0
reports['net'] = 0
cost = 0
if 'items' in context:
if len(context['items']) > 1:
show_plot = True
context['reports'] = reports
context['company'] = company
context['cost'] = cost
context['show_plot'] = show_plot
context['plot_items'] = plot_items
return render(request, 'report_index.html', context)
def report_plot(request): # http://stackoverflow.com/a/5515994/185820
"""
"""
values = get_query(request, 'values')
# http://matplotlib.org/examples/api/date_demo.html
x = [date2num(datetime.strptime(i[1], '%Y-%m-%d')) for i in values]
y = [i[0] for i in values]
figure = Figure()
canvas = FigureCanvasAgg(figure)
axes = figure.add_subplot(1, 1, 1)
axes.grid(True)
axes.plot(x, y)
axes.xaxis.set_major_locator(MonthLocator())
axes.xaxis.set_major_formatter(DateFormatter('%m'))
# write image data to a string buffer and get the PNG image bytes
buf = BytesIO()
canvas.print_png(buf)
data = buf.getvalue()
# write image bytes back to the browser
return HttpResponse(data, content_type="image/png")
# https://stackoverflow.com/a/42038839/185820
@staff_member_required(login_url='login')
def service_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'service', page_type='edit')
return edit(
request,
ServiceForm,
Service,
url_name,
template_name,
active_nav='dropdown',
company_model=Company,
pk=pk)
@staff_member_required
def settings(request):
context = {}
context['settings'] = settings
context['active_tab'] = 'system'
context['active_nav'] = 'dropdown'
return render(request, 'settings.html', context)
@staff_member_required
def settings_edit(request, pk=None):
return edit(
request,
AppSettingsForm,
AppSettings,
'settings',
'settings_edit.html',
active_nav='dropdown',
pk=1)
@staff_member_required
def task(request, pk=None):
context = {}
task = get_object_or_404(Task, pk=pk)
context['active_nav'] = 'task'
context['edit_url'] = 'task_edit' # Delete modal
context['item'] = task
return render(request, 'task.html', context)
@staff_member_required
def task_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'task', page_type='edit')
return edit(
request,
TaskForm,
Task,
url_name,
'task_edit.html',
active_nav='task',
pk=pk)
@staff_member_required
def task_index(request):
search_fields = ('name', )
context = get_index_items(
request,
Task,
search_fields,
active_nav='task',
app_settings_model=AppSettings,
edit_url='task_edit', # Delete modal
order_by=('-updated', '-active'),
show_search=True)
return render(request, 'task_index.html', context)
@login_required
def time(request, pk=None):
context = {}
entry = get_object_or_404(Time, pk=pk)
if not entry.user and not request.user.is_staff:
return HttpResponseRedirect(reverse('login'))
if entry.user:
if (not entry.user.username == request.user.username and
not request.user.is_staff):
return HttpResponseRedirect(reverse('login'))
context['active_nav'] = 'time'
context['edit_url'] = 'time_edit' # Delete modal
context['item'] = entry
return render(request, 'time.html', context)
@login_required
def time_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'time', page_type='edit')
return edit(
request,
TimeForm,
Time,
url_name,
template_name,
active_nav='time',
invoice_model=Invoice,
estimate_model=Estimate,
project_model=Project,
task_model=Task,
time_model=Time,
pk=pk, )
@login_required
def time_index(request):
search_fields = ('client__name', 'date', 'log', 'pk', 'project__name',
'invoice__document_id', 'user__username')
context = get_index_items(
request,
Time,
search_fields,
active_nav='time',
app_settings_model=AppSettings,
edit_url='time_edit', # Delete modal
# page_size=3,
order_by=('-date', ),
show_search=True)
if not request.user.is_staff:
return HttpResponseRedirect(reverse('login'))
else:
return render(request, 'time_index.html', context)
@login_required
def user(request, pk=None):
contacts = Contact.objects.all()
user = get_object_or_404(User, pk=pk)
profile = Profile.objects.get_or_create(user=user)[0]
filters = {
'estimate': None,
'user': user,
}
search_fields = ()
context = get_index_items(
request,
Time,
search_fields,
active_nav='user',
app_settings_model=AppSettings,
order_by=('-date', ),
filters=filters, )
total_hours = context['total_hours']
if profile.rate and total_hours:
total_dollars = profile.rate * total_hours
else:
total_dollars = 0
context['active_nav'] = 'dropdown'
context['company'] = company
context['edit_url'] = 'user_edit' # Delete modal
context['icon_size'] = get_setting(request, AppSettings, 'icon_size')
context['item'] = user
context['profile'] = profile
context['request'] = request
context['total_dollars'] = '%.2f' % total_dollars
context['is_contact'] = user.email in [i.email for i in contacts]
projects = Project.objects.filter(team__in=[user, ]).order_by('-updated')
context['projects'] = projects
if request.user.pk == int(pk) or request.user.is_staff:
return render(request, 'user.html', context)
else:
return HttpResponseRedirect(reverse('home'))
@staff_member_required
def user_contact(request, pk=None):
return add_user_to_contacts(request, Contact, pk=pk)
@login_required
def user_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'user', page_type='edit')
return edit(
request,
ProfileForm,
Profile,
url_name,
template_name,
active_nav='dropdown',
pk=pk)
@staff_member_required
def user_index(request):
search_fields = ()
context = get_index_items(
request,
User,
search_fields,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-profile__active', '-profile__updated'),
show_search=False)
context['company'] = company
# Check if user is contact
contacts = Contact.objects.all()
items = context['items']
for item in items:
if item.email in [i.email for i in contacts]:
item.is_contact = True
else:
item.is_contact = False
context['items'] = items
return render(request, 'user_index.html', context)
Update
from .forms import ClientForm
from .forms import CompanyForm
from .forms import ContactForm
from .forms import ContractForm
from .forms import ContractSettingsForm
from .forms import EstimateForm
from .forms import InvoiceForm
from .forms import MailForm
from .forms import NewsletterForm
from .forms import NoteForm
from .forms import ProfileForm
from .forms import ProjectForm
from .forms import ProposalForm
from .forms import ReportForm
from .forms import SettingsForm as AppSettingsForm
from .forms import ServiceForm
from .forms import TaskForm
from .forms import TimeForm
from .models import Client
from .models import Company
from .models import Contact
from .models import Contract
from .models import ContractSettings
from .models import Estimate
from .models import Invoice
from .models import Log
from .models import Newsletter
from .models import Note
from .models import Profile
from .models import Project
from .models import Proposal
from .models import Report
from .models import Service
from .models import Settings as AppSettings
from .models import Testimonial
from .models import Task
from .models import Time
from .serializers import ClientSerializer
from .serializers import ProfileSerializer
from .serializers import ServiceSerializer
from .serializers import TestimonialSerializer
from .utils import add_user_to_contacts
from .utils import get_index_items
from .utils import get_page_items
from .utils import create_and_send_mail
from .utils import edit
from .utils import generate_doc
from .utils import get_client_city
from .utils import get_company_name
from .utils import get_setting
from .utils import get_template_and_url_names
from .utils import get_query
from .utils import send_mail
from datetime import datetime
from django.contrib import messages
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models import F, Sum
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django_xhtml2pdf.utils import generate_pdf
from io import BytesIO
from matplotlib.dates import DateFormatter
from matplotlib.dates import MonthLocator
from matplotlib.dates import date2num
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from rest_framework import viewsets
# Create your views here.
class ClientViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Client.objects.filter(published=True).order_by('name')
serializer_class = ClientSerializer
class ServiceViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Service.objects.filter(active=True).order_by('name')
serializer_class = ServiceSerializer
class TestimonialViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Testimonial.objects.filter(active=True).order_by('-issue_date')
serializer_class = TestimonialSerializer
class ProfileViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Profile.objects.filter(
published=True).order_by('user__first_name')
serializer_class = ProfileSerializer
@staff_member_required
def client(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
contact_model=Contact,
contract_model=Contract,
model=Client,
pk=pk,
project_model=Project)
return render(request, 'client.html', context)
@staff_member_required
def client_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'client', page_type='edit')
return edit(
request,
ClientForm,
Client,
url_name,
template_name,
active_nav='client',
pk=pk)
@staff_member_required
def client_index(request):
search_fields = ('address', 'name')
context = get_index_items(
request,
Client,
search_fields,
active_nav='client',
app_settings_model=AppSettings,
edit_url='client_edit', # Delete modal
order_by=('-updated', '-active', 'name'),
show_search=True)
return render(request, 'client_index.html', context)
@staff_member_required
def company_edit(request, pk=None):
return edit(
request,
CompanyForm,
Company,
'company',
'company_edit.html',
active_nav='dropdown',
pk=1)
@staff_member_required
def company(request):
context = {}
company = Company.get_solo()
services = company.service_set.all().order_by('-updated')
context['active_nav'] = 'dropdown'
context['active_tab'] = 'company'
context['company'] = company
context['notes'] = company.note.all()
context['services'] = services
return render(request, 'company.html', context)
@staff_member_required
def contact(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
context['active_nav'] = 'contact'
context['edit_url'] = 'contact_edit' # Delete modal
context['item'] = contact
return render(request, 'contact.html', context)
@staff_member_required
def contact_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'contact', page_type='edit')
return edit(
request,
ContactForm,
Contact,
url_name,
template_name,
active_nav='contact',
pk=pk)
@staff_member_required
def contact_index(request):
search_fields = ('first_name', 'last_name', 'email', 'notes')
context = get_index_items(
request,
Contact,
search_fields,
active_nav='contact',
app_settings_model=AppSettings,
edit_url='contact_edit', # Delete modal
order_by=('-updated', '-active', 'first_name'),
show_search=True)
return render(request, 'contact_index.html', context)
@staff_member_required
def contact_mail(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
if request.method == 'POST' and create_and_send_mail(
request, Log, mail_form=MailForm, contact=contact, pk=pk):
return HttpResponseRedirect(reverse('contact', kwargs={'pk': pk}))
else:
form = MailForm()
context['active_nav'] = 'contact'
context['contact'] = contact
context['form'] = form
return render(request, 'contact_mail.html', context)
def contact_unsubscribe(request, pk=None):
contact = get_object_or_404(Contact, pk=pk)
uuid = request.GET.get('id')
if uuid == contact.uuid:
contact.subscribed = False
contact.save()
messages.add_message(request, messages.SUCCESS,
'You have been unsubscribed!')
log = Log(entry='%s unsubscribed.' % contact.email)
log.save()
return HttpResponseRedirect(reverse('home'))
else:
messages.add_message(request, messages.WARNING, 'Nothing to see here.')
return HttpResponseRedirect(reverse('home'))
@staff_member_required
def contract(request, pk=None):
"""
"""
context = get_page_items(
request, company_model=Company, model=Contract, pk=pk, time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = get_company_name(company)
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_contract.html', context=context, file_object=response)
if context['doc']:
# https://stackoverflow.com/a/24122313/185820
document = generate_doc(contract)
filename = get_company_name(company)
f = BytesIO()
document.save(f)
length = f.tell()
f.seek(0)
content_type = 'application/vnd.openxmlformats-'
content_type += 'officedocument.wordprocessingml.document'
response = HttpResponse(f.getvalue(), content_type=content_type)
response['Content-Disposition'] = 'filename=%s.docx' % filename
response['Content-Length'] = length
return response
else:
return render(request, 'contract.html', context)
@staff_member_required
def contract_edit(request, pk=None):
"""
"""
template_name, url_name = get_template_and_url_names(
'contract', page_type='edit')
return edit(
request,
ContractForm,
Contract,
url_name,
template_name,
active_nav='contract',
pk=pk)
@staff_member_required
def contract_index(request):
"""
"""
search_fields = ()
context = get_index_items(
request,
Contract,
search_fields,
active_nav='contract',
app_settings_model=AppSettings,
order_by=('-created', ))
return render(request, 'contract_index.html', context)
@staff_member_required
def contract_settings(request):
context = {}
fields = {}
contract_settings = ContractSettings.get_solo()
for field in contract_settings._meta.fields:
if field.description == 'Text' and field.name != 'body':
fields[field.name] = {}
fields[field.name]['name'] = field.verbose_name
fields[field.name]['value'] = getattr(contract_settings,
field.name)
context['fields'] = fields
context['active_tab'] = 'contract'
context['active_nav'] = 'dropdown'
return render(request, 'contract_settings.html', context)
@staff_member_required
def contract_settings_edit(request, pk=None):
return edit(
request,
ContractSettingsForm,
ContractSettings,
'contract_settings',
'contract_settings_edit.html',
pk=1,
active_nav='dropdown')
@staff_member_required
def estimate(request, pk=None):
context = get_page_items(
request, company_model=Company, model=Estimate, pk=pk, time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = '-'.join(['estimate', pk])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'estimate.html', context)
@staff_member_required
def estimate_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'estimate', page_type='edit')
return edit(
request,
EstimateForm,
Estimate,
url_name,
template_name,
active_nav='estimate',
company_model=Company,
pk=pk)
@staff_member_required
def estimate_index(request):
search_fields = ('subject', )
context = get_index_items(
request,
Estimate,
search_fields,
active_nav='estimate',
app_settings_model=AppSettings,
edit_url='estimate_edit', # Delete modal
order_by=('-issue_date', ),
show_search=True)
context['company'] = company
return render(request, 'estimate_index.html', context)
@staff_member_required
def estimate_mail(request, pk=None):
estimate = get_object_or_404(Estimate, pk=pk)
if create_and_send_mail(
request, Log, estimate=estimate, profile_model=Profile):
return HttpResponseRedirect(reverse('estimate', kwargs={'pk': pk}))
def home(request):
context = get_page_items(
request,
app_settings_model=AppSettings,
invoice_model=Invoice,
note_model=Note,
project_model=Project,
report_model=Report)
return render(request, 'home.html', context)
@staff_member_required
def invoice(request, pk=None):
context = get_page_items(
request, Invoice, company_model=Company, pk=pk, time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = '_'.join(['invoice', pk])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'invoice.html', context)
@staff_member_required
def invoice_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'invoice', page_type='edit')
return edit(
request,
InvoiceForm,
Invoice,
url_name,
template_name,
active_nav='invoice',
company_model=Company,
pk=pk, )
@staff_member_required
def invoice_index(request):
search_fields = (
'client__name',
'document_id',
'issue_date',
'project__name',
'subject', )
context = get_index_items(
request,
Invoice,
search_fields,
active_nav='invoice',
app_settings_model=AppSettings,
edit_url='invoice_edit', # Delete modal
order_by=('-issue_date', ),
show_search=True)
return render(request, 'invoice_index.html', context)
def login(request):
context = {}
context['login'] = True
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
# https://stackoverflow.com/a/39316967/185820
auth_login(request, user)
city_data = get_client_city(request)
log = Log(entry='%s logged in from %s' % (user, city_data))
log.save()
return HttpResponseRedirect(reverse('home'))
else:
messages.add_message(request, messages.WARNING, 'Login failed.')
return HttpResponseRedirect(reverse('home'))
return render(request, 'login.html', context)
@staff_member_required
def log_index(request):
search_fields = ('entry', )
context = get_index_items(
request,
Log,
search_fields,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-created', ), )
return render(request, 'log_index.html', context)
@staff_member_required
def newsletter(request, pk=None):
"""
"""
context = {}
newsletter = get_object_or_404(Newsletter, pk=pk)
context['active_nav'] = 'dropdown'
context['contacts'] = newsletter.contacts.all().order_by('first_name')
context['edit_url'] = 'newsletter_edit'
context['item'] = newsletter
return render(request, 'newsletter.html', context)
@staff_member_required
def newsletter_edit(request, pk=None):
"""
"""
template_name, url_name = get_template_and_url_names(
'newsletter', page_type='edit')
return edit(
request,
NewsletterForm,
Newsletter,
url_name,
template_name,
active_nav='dropdown',
pk=pk)
@staff_member_required
def newsletter_index(request, pk=None):
"""
"""
search_fields = ('text', )
context = get_index_items(
request,
Newsletter,
search_fields,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-created', ))
return render(request, 'newsletter_index.html', context)
@staff_member_required
def newsletter_send(request, pk=None):
"""
"""
context = {}
newsletter = get_object_or_404(Newsletter, pk=pk)
contacts = newsletter.contacts.all().order_by('first_name')
for contact in contacts:
url = reverse('contact_unsubscribe', kwargs={'pk': contact.pk})
url = ''.join([request.get_host(), url])
to = contact.email
first_name = contact.first_name
subject = newsletter.subject
message = newsletter.text
if send_mail(
request,
subject,
message,
to,
url=url,
uuid=contact.uuid,
first_name=first_name):
log = Log(entry='Mail sent to %s.' % to)
log.save()
messages.add_message(request, messages.SUCCESS, 'Batch mail sent!')
context['active_nav'] = 'newsletter'
context['contacts'] = contacts
context['edit_url'] = 'newsletter_edit'
context['item'] = newsletter
return render(request, 'newsletter.html', context)
@staff_member_required
def note(request, pk=None):
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
note = get_object_or_404(Note, pk=pk)
notes = Note.objects.filter(note=note)
notes = notes.order_by('-pk')
context['active_nav'] = 'note'
context['edit_url'] = 'note_edit'
context['item'] = note
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=note-%s.pdf' % pk
return generate_pdf(
'pdf_note.html', context=context, file_object=response)
else:
return render(request, 'note.html', context)
# https://stackoverflow.com/a/42038839/185820
@staff_member_required(login_url='login')
def note_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'note', page_type='edit')
return edit(
request,
NoteForm,
Note,
url_name,
template_name,
active_nav='note',
company_model=Company,
pk=pk)
@staff_member_required
def note_index(request, pk=None):
search_fields = ('note', )
filters = {'hidden': False, }
context = get_index_items(
request,
Note,
search_fields,
active_nav='note',
app_settings_model=AppSettings,
filters=filters,
order_by=('-active', '-updated', 'note', 'due_date', 'priority'),
show_search=True)
context['edit_url'] = 'note_edit' # Delete modal
return render(request, 'note_index.html', context)
@staff_member_required
def project(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
model=Project,
estimate_model=Estimate,
invoice_model=Invoice,
time_model=Time,
pk=pk)
return render(request, 'project.html', context)
@staff_member_required
def project_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'project', page_type='edit')
return edit(
request,
ProjectForm,
Project,
url_name,
template_name,
active_nav='project',
pk=pk)
@staff_member_required
def project_index(request, pk=None):
search_fields = ('id', 'name')
context = get_index_items(
request,
Project,
search_fields,
active_nav='project',
app_settings_model=AppSettings,
edit_url='project_edit', # Delete modal
order_by=(
'-updated',
'-active', ),
show_search=True)
return render(request, 'project_index.html', context)
@staff_member_required
def proposal(request, pk=None):
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
proposal = get_object_or_404(Proposal, pk=pk)
context['active_nav'] = 'dropdown'
context['edit_url'] = 'proposal_edit'
context['item'] = proposal
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=proposal-%s.pdf' % pk
return generate_pdf(
'pdf_proposal.html', context=context, file_object=response)
else:
return render(request, 'proposal.html', context)
def proposal_edit(request, pk=None):
"""
"""
template_name, url_name = get_template_and_url_names(
'proposal', page_type='edit')
return edit(
request,
ProposalForm,
Proposal,
url_name,
template_name,
active_nav='dropdown',
company_model=Company,
pk=pk)
@staff_member_required
def proposal_index(request, pk=None):
search_fields = ()
context = get_index_items(
request,
Proposal,
search_fields,
active_nav='dropdown',
app_settings_model=AppSettings,
show_search=True)
context['edit_url'] = 'proposal_edit' # Delete modal
return render(request, 'proposal_index.html', context)
@staff_member_required
def report(request, pk=None):
company = Company.get_solo()
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
report = get_object_or_404(Report, pk=pk)
reports = Report.objects.filter(active=True)
reports = reports.aggregate(gross=Sum(F('gross')), net=Sum(F('net')))
context['active_nav'] = 'dropdown'
context['company'] = company
context['cost'] = report.gross - report.net
context['edit_url'] = 'report_edit' # Delete modal
context['item'] = report
context['reports'] = reports
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=report-%s.pdf' % pk
return generate_pdf(
'pdf_report.html', context=context, file_object=response)
else:
return render(request, 'report.html', context)
@staff_member_required
def report_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'report', page_type='edit')
return edit(
request,
ReportForm,
Report,
url_name,
template_name,
active_nav='dropdown',
invoice_model=Invoice,
pk=pk)
@staff_member_required
def report_index(request):
show_plot = False
reports = Report.objects.filter(active=True)
plot_items = reports # Save for plotting
reports = reports.aggregate(gross=Sum(F('gross')), net=Sum(F('net')))
company = Company.get_solo()
search_fields = ('id', 'name', 'gross', 'net')
context = get_index_items(
request,
Report,
search_fields,
active_nav='dropdown',
app_settings_model=AppSettings,
edit_url='report_edit', # Delete modal
order_by=('-date', ),
show_search=True)
if reports['gross'] is not None and reports['net'] is not None:
cost = reports['gross'] - reports['net']
else:
reports['gross'] = 0
reports['net'] = 0
cost = 0
if 'items' in context:
if len(context['items']) > 1:
show_plot = True
context['reports'] = reports
context['company'] = company
context['cost'] = cost
context['show_plot'] = show_plot
context['plot_items'] = plot_items
return render(request, 'report_index.html', context)
def report_plot(request): # http://stackoverflow.com/a/5515994/185820
"""
"""
values = get_query(request, 'values')
# http://matplotlib.org/examples/api/date_demo.html
x = [date2num(datetime.strptime(i[1], '%Y-%m-%d')) for i in values]
y = [i[0] for i in values]
figure = Figure()
canvas = FigureCanvasAgg(figure)
axes = figure.add_subplot(1, 1, 1)
axes.grid(True)
axes.plot(x, y)
axes.xaxis.set_major_locator(MonthLocator())
axes.xaxis.set_major_formatter(DateFormatter('%m'))
# write image data to a string buffer and get the PNG image bytes
buf = BytesIO()
canvas.print_png(buf)
data = buf.getvalue()
# write image bytes back to the browser
return HttpResponse(data, content_type="image/png")
# https://stackoverflow.com/a/42038839/185820
@staff_member_required(login_url='login')
def service_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'service', page_type='edit')
return edit(
request,
ServiceForm,
Service,
url_name,
template_name,
active_nav='dropdown',
company_model=Company,
pk=pk)
@staff_member_required
def settings(request):
context = {}
context['settings'] = settings
context['active_tab'] = 'system'
context['active_nav'] = 'dropdown'
return render(request, 'settings.html', context)
@staff_member_required
def settings_edit(request, pk=None):
return edit(
request,
AppSettingsForm,
AppSettings,
'settings',
'settings_edit.html',
active_nav='dropdown',
pk=1)
@staff_member_required
def task(request, pk=None):
context = {}
task = get_object_or_404(Task, pk=pk)
context['active_nav'] = 'task'
context['edit_url'] = 'task_edit' # Delete modal
context['item'] = task
return render(request, 'task.html', context)
@staff_member_required
def task_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'task', page_type='edit')
return edit(
request,
TaskForm,
Task,
url_name,
'task_edit.html',
active_nav='task',
pk=pk)
@staff_member_required
def task_index(request):
search_fields = ('name', )
context = get_index_items(
request,
Task,
search_fields,
active_nav='task',
app_settings_model=AppSettings,
edit_url='task_edit', # Delete modal
order_by=('-updated', '-active'),
show_search=True)
return render(request, 'task_index.html', context)
@login_required
def time(request, pk=None):
context = {}
entry = get_object_or_404(Time, pk=pk)
if not entry.user and not request.user.is_staff:
return HttpResponseRedirect(reverse('login'))
if entry.user:
if (not entry.user.username == request.user.username and
not request.user.is_staff):
return HttpResponseRedirect(reverse('login'))
context['active_nav'] = 'time'
context['edit_url'] = 'time_edit' # Delete modal
context['item'] = entry
return render(request, 'time.html', context)
@login_required
def time_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'time', page_type='edit')
return edit(
request,
TimeForm,
Time,
url_name,
template_name,
active_nav='time',
invoice_model=Invoice,
estimate_model=Estimate,
project_model=Project,
task_model=Task,
time_model=Time,
pk=pk, )
@login_required
def time_index(request):
search_fields = ('client__name', 'date', 'log', 'pk', 'project__name',
'invoice__document_id', 'user__username')
context = get_index_items(
request,
Time,
search_fields,
active_nav='time',
app_settings_model=AppSettings,
edit_url='time_edit', # Delete modal
# page_size=3,
order_by=('-date', ),
show_search=True)
if not request.user.is_staff:
return HttpResponseRedirect(reverse('login'))
else:
return render(request, 'time_index.html', context)
@login_required
def user(request, pk=None):
contacts = Contact.objects.all()
user = get_object_or_404(User, pk=pk)
profile = Profile.objects.get_or_create(user=user)[0]
filters = {
'estimate': None,
'user': user,
}
search_fields = ()
context = get_index_items(
request,
Time,
search_fields,
active_nav='user',
app_settings_model=AppSettings,
order_by=('-date', ),
filters=filters, )
total_hours = context['total_hours']
if profile.rate and total_hours:
total_dollars = profile.rate * total_hours
else:
total_dollars = 0
context['active_nav'] = 'dropdown'
context['company'] = company
context['edit_url'] = 'user_edit' # Delete modal
context['icon_size'] = get_setting(request, AppSettings, 'icon_size')
context['item'] = user
context['profile'] = profile
context['request'] = request
context['total_dollars'] = '%.2f' % total_dollars
context['is_contact'] = user.email in [i.email for i in contacts]
projects = Project.objects.filter(team__in=[user, ]).order_by('-updated')
context['projects'] = projects
if request.user.pk == int(pk) or request.user.is_staff:
return render(request, 'user.html', context)
else:
return HttpResponseRedirect(reverse('home'))
@staff_member_required
def user_contact(request, pk=None):
return add_user_to_contacts(request, Contact, pk=pk)
@login_required
def user_edit(request, pk=None):
template_name, url_name = get_template_and_url_names(
'user', page_type='edit')
return edit(
request,
ProfileForm,
Profile,
url_name,
template_name,
active_nav='dropdown',
pk=pk)
@staff_member_required
def user_index(request):
search_fields = ()
context = get_index_items(
request,
User,
search_fields,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-profile__active', '-profile__updated'),
show_search=False)
context['company'] = company
# Check if user is contact
contacts = Contact.objects.all()
items = context['items']
for item in items:
if item.email in [i.email for i in contacts]:
item.is_contact = True
else:
item.is_contact = False
context['items'] = items
return render(request, 'user_index.html', context)
|
from .forms import AdminProfileForm
from .forms import AdminTimeForm
from .forms import AppSettingsForm
from .forms import ClientForm
from .forms import CompanyForm
from .forms import ContactForm
from .forms import ContractForm
from .forms import ContractSettingsForm
from .forms import EstimateForm
from .forms import FileForm
from .forms import InvoiceForm
from .forms import MailForm
from .forms import NewsletterForm
from .forms import NoteForm
from .forms import ProfileForm
from .forms import ProjectForm
from .forms import ProposalForm
from .forms import ReportForm
from .forms import ServiceForm
from .forms import TaskForm
from .forms import TimeForm
from .models import AppSettings
from .models import Client
from .models import Company
from .models import Contact
from .models import Contract
from .models import ContractSettings
from .models import Estimate
from .models import File
from .models import Invoice
from .models import Log
from .models import Newsletter
from .models import Note
from .models import Profile
from .models import Project
from .models import Proposal
from .models import Report
from .models import Service
from .models import Testimonial
from .models import Task
from .models import Time
from .serializers import ClientSerializer
from .serializers import ProfileSerializer
from .serializers import ServiceSerializer
from .serializers import TestimonialSerializer
from .utils import edit
from .utils import generate_doc
from .utils import get_client_city
from .utils import get_company_name
from .utils import get_index_items
from .utils import get_page_items
from django.contrib import messages
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django_xhtml2pdf.utils import generate_pdf
from io import BytesIO
from rest_framework import viewsets
# Create your views here.
class ClientViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Client.objects.filter(published=True).order_by('name')
serializer_class = ClientSerializer
class ServiceViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Service.objects.filter(active=True).order_by('name')
serializer_class = ServiceSerializer
class TestimonialViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Testimonial.objects.filter(active=True).order_by('-issue_date')
serializer_class = TestimonialSerializer
class ProfileViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Profile.objects.filter(
published=True).order_by('user__first_name')
serializer_class = ProfileSerializer
@staff_member_required
def client(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
contact_model=Contact,
contract_model=Contract,
model=Client,
pk=pk,
project_model=Project)
return render(request, 'client.html', context)
@staff_member_required
def client_edit(request, pk=None):
return edit(
request,
form_model=ClientForm,
model=Client,
active_nav='client',
pk=pk)
@staff_member_required
def client_index(request):
context = get_index_items(
request,
Client,
active_nav='client',
app_settings_model=AppSettings,
edit_url='client_edit',
order_by=('-active', '-updated', 'name'),
search_fields=('address', 'name'),
show_search=True)
return render(request, 'client_index.html', context)
@staff_member_required
def contact(request, pk=None):
context = get_page_items(
request, app_settings_model=AppSettings, model=Contact, pk=pk)
return render(request, 'contact.html', context)
@staff_member_required
def contact_edit(request, pk=None):
return edit(
request,
form_model=ContactForm,
model=Contact,
active_nav='contact',
client_model=Client,
user_model=User,
pk=pk)
@staff_member_required
def contact_index(request):
context = get_index_items(
request,
Contact,
active_nav='contact',
app_settings_model=AppSettings,
edit_url='contact_edit',
order_by=('-active', 'first_name'),
search_fields=('first_name', 'last_name', 'email', 'notes', 'pk'),
show_search=True)
return render(request, 'contact_index.html', context)
@staff_member_required
def contract(request, pk=None):
"""
"""
company = Company.get_solo()
context = get_page_items(
request, company_model=Company, model=Contract, pk=pk, time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = get_company_name(company)
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_contract.html', context=context, file_object=response)
if context['doc']:
# https://stackoverflow.com/a/24122313/185820
document = generate_doc(contract)
filename = get_company_name(company)
f = BytesIO()
document.save(f)
length = f.tell()
f.seek(0)
content_type = 'application/vnd.openxmlformats-'
content_type += 'officedocument.wordprocessingml.document'
response = HttpResponse(f.getvalue(), content_type=content_type)
response['Content-Disposition'] = 'filename=%s.docx' % filename
response['Content-Length'] = length
return response
else:
return render(request, 'contract.html', context)
@staff_member_required
def contract_edit(request, pk=None):
"""
"""
return edit(
request,
form_model=ContractForm,
model=Contract,
active_nav='contract',
pk=pk)
@staff_member_required
def contract_index(request):
"""
"""
context = get_index_items(
request,
Contract,
active_nav='contract',
order_by=('-updated', ),
app_settings_model=AppSettings)
return render(request, 'contract_index.html', context)
@staff_member_required
def estimate(request, pk=None):
order_by = {'time': ('date', ), }
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
model=Estimate,
order_by=order_by,
pk=pk,
time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = '-'.join(['estimate', pk])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'estimate.html', context)
@staff_member_required
def estimate_edit(request, pk=None):
return edit(
request,
form_model=EstimateForm,
model=Estimate,
active_nav='estimate',
company_model=Company,
project_model=Project,
pk=pk)
@staff_member_required
def estimate_index(request):
company = Company.get_solo()
context = get_index_items(
request,
Estimate,
active_nav='estimate',
app_settings_model=AppSettings,
edit_url='estimate_edit',
order_by=('-issue_date', ),
search_fields=('subject', ),
show_search=True)
context['company'] = company
return render(request, 'estimate_index.html', context)
@staff_member_required
def file_view(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
model=File,
pk=pk)
return render(request, 'file.html', context)
@staff_member_required
def file_edit(request, pk=None):
return edit(
request,
form_model=FileForm,
model=File,
active_nav='dropdown',
company_model=Company,
pk=pk, )
@staff_member_required
def file_index(request):
context = get_index_items(
request,
File,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ))
return render(request, 'file_index.html', context)
def home(request):
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
columns_visible={
'note': {
'due': 'false',
'hidden': 'false'
},
'invoice': {
'paid': 'false',
},
},
invoice_model=Invoice,
note_model=Note,
order_by={
'note': ('-updated', ),
'project': ('-updated', ),
'time': ('-updated', ),
},
project_model=Project,
time_model=Time,
report_model=Report)
return render(request, 'home.html', context)
@staff_member_required
def invoice(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
model=Invoice,
order_by={'time': ('date', )}, # For time entries
pk=pk,
time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
company_name = get_company_name(context['company'])
model_name = context['model_name'].upper()
doc_id = context['item'].document_id or pk
filename = '_'.join([company_name, model_name, str(doc_id)])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'invoice.html', context)
@staff_member_required
def invoice_edit(request, pk=None):
return edit(
request,
form_model=InvoiceForm,
model=Invoice,
active_nav='invoice',
company_model=Company,
project_model=Project,
pk=pk, )
@staff_member_required
def invoice_index(request):
search_fields = (
'client__name',
'document_id',
'issue_date',
'project__name',
'subject', )
context = get_index_items(
request,
Invoice,
active_nav='invoice',
app_settings_model=AppSettings,
edit_url='invoice_edit',
order_by=('-updated', ),
search_fields=search_fields,
show_search=True)
return render(request, 'invoice_index.html', context)
def login(request):
context = {}
context['login'] = True
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
# https://stackoverflow.com/a/39316967/185820
auth_login(request, user)
city_data = get_client_city(request)
log = Log(entry='%s logged in from %s' % (user, city_data))
log.save()
Profile.objects.get_or_create(user=user)
return HttpResponseRedirect(reverse('home'))
else:
messages.add_message(request, messages.WARNING, 'Login failed.')
return HttpResponseRedirect(reverse('home'))
return render(request, 'login.html', context)
@staff_member_required
def log_index(request):
context = get_index_items(
request,
Log,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ),
search_fields=('entry', ))
return render(request, 'log_index.html', context)
@staff_member_required(login_url='login')
def mail(request):
"""
"""
return edit(
request,
contact_model=Contact,
form_model=MailForm,
note_model=Note,
page_type='edit')
@staff_member_required
def newsletter(request, pk=None):
"""
"""
context = get_page_items(
request, app_settings_model=AppSettings, model=Newsletter, pk=pk)
return render(request, 'newsletter.html', context)
@staff_member_required
def newsletter_edit(request, pk=None):
"""
"""
return edit(
request,
form_model=NewsletterForm,
model=Newsletter,
active_nav='dropdown',
pk=pk)
@staff_member_required
def newsletter_index(request, pk=None):
"""
"""
context = get_index_items(
request,
Newsletter,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ),
search_fields=('text', ))
return render(request, 'newsletter_index.html', context)
@staff_member_required
def note(request, pk=None):
context = get_page_items(
request, app_settings_model=AppSettings, model=Note, pk=pk)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=note-%s.pdf' % pk
return generate_pdf(
'pdf_note.html', context=context, file_object=response)
else:
return render(request, 'note.html', context)
# https://stackoverflow.com/a/42038839/185820
@staff_member_required(login_url='login')
def note_edit(request, pk=None):
return edit(
request,
form_model=NoteForm,
model=Note,
active_nav='note',
app_settings_model=AppSettings,
client_model=Client,
company_model=Company,
pk=pk)
@staff_member_required
def note_index(request, pk=None):
context = get_index_items(
request,
Note,
active_nav='note',
app_settings_model=AppSettings,
order_by=('-active', '-updated'),
search_fields=('note', 'title'),
show_search=True)
context['edit_url'] = 'note_edit'
return render(request, 'note_index.html', context)
@login_required
def profile_edit(request, pk=None):
if request.user.is_staff:
profile_form = AdminProfileForm
else:
profile_form = ProfileForm
return edit(
request,
form_model=profile_form,
model=Profile,
active_nav='dropdown',
pk=pk)
@staff_member_required
def project(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
model=Project,
contact_model=Contact,
estimate_model=Estimate,
invoice_model=Invoice,
user_model=User,
order_by={'time': ('date', )}, # For time entries
time_model=Time,
pk=pk)
return render(request, 'project.html', context)
@staff_member_required
def project_edit(request, pk=None):
return edit(
request,
form_model=ProjectForm,
model=Project,
client_model=Client,
active_nav='project',
pk=pk)
@staff_member_required
def project_index(request, pk=None):
context = get_index_items(
request,
Project,
active_nav='project',
app_settings_model=AppSettings,
columns_visible={'project': {
'notes': 'true',
}, },
edit_url='project_edit',
order_by=(
'-active',
'-updated', ),
search_fields=('id', 'name'),
show_search=True)
return render(request, 'project_index.html', context)
@staff_member_required
def proposal(request, pk=None):
context = get_page_items(
request, company_model=Company, model=Proposal, pk=pk)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=proposal-%s.pdf' % pk
return generate_pdf(
'pdf_proposal.html', context=context, file_object=response)
else:
return render(request, 'proposal.html', context)
@staff_member_required
def proposal_edit(request, pk=None):
"""
"""
return edit(
request,
form_model=ProposalForm,
model=Proposal,
active_nav='proposal',
company_model=Company,
pk=pk)
@staff_member_required
def proposal_index(request, pk=None):
context = get_index_items(
request,
Proposal,
active_nav='proposal',
app_settings_model=AppSettings,
order_by=('-updated', ),
show_search=True)
context['edit_url'] = 'proposal_edit'
return render(request, 'proposal_index.html', context)
@staff_member_required
def report(request, pk=None):
context = get_page_items(
request, model=Report, app_settings_model=AppSettings, pk=pk)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=report-%s.pdf' % pk
return generate_pdf(
'pdf_report.html', context=context, file_object=response)
else:
return render(request, 'report.html', context)
@staff_member_required
def report_edit(request, pk=None):
return edit(
request,
form_model=ReportForm,
model=Report,
active_nav='dropdown',
invoice_model=Invoice,
pk=pk)
@staff_member_required
def report_index(request):
context = get_index_items(
request,
Report,
active_nav='dropdown',
app_settings_model=AppSettings,
edit_url='report_edit',
order_by=('-updated', '-active'),
search_fields=('id', 'name', 'gross', 'net'),
show_search=True)
return render(request, 'report_index.html', context)
# https://stackoverflow.com/a/42038839/185820
@staff_member_required(login_url='login')
def service_edit(request, pk=None):
return edit(
request,
form_model=ServiceForm,
model=Service,
active_nav='dropdown',
company_model=Company,
pk=pk)
@staff_member_required
def settings_app(request):
context = get_page_items(
request, model=AppSettings, app_settings_model=AppSettings)
return render(request, 'settings.html', context)
@staff_member_required
def settings_app_edit(request, pk=None):
return edit(
request,
form_model=AppSettingsForm,
model=AppSettings,
active_nav='dropdown',
pk=1)
@staff_member_required
def settings_company_edit(request, pk=None):
return edit(
request,
form_model=CompanyForm,
model=Company,
active_nav='dropdown',
pk=1)
@staff_member_required
def settings_company(request):
context = get_page_items(
request, app_settings_model=AppSettings, model=Company)
return render(request, 'company.html', context)
@staff_member_required
def settings_contract(request):
context = get_page_items(
request, model=ContractSettings, app_settings_model=AppSettings)
return render(request, 'contract_settings.html', context)
@staff_member_required
def settings_contract_edit(request, pk=None):
return edit(
request,
form_model=ContractSettingsForm,
model=ContractSettings,
pk=1,
active_nav='dropdown')
@staff_member_required
def task(request, pk=None):
context = get_page_items(
request, model=Task, app_settings_model=AppSettings, pk=pk)
return render(request, 'task.html', context)
@staff_member_required
def task_edit(request, pk=None):
return edit(
request, form_model=TaskForm, model=Task, active_nav='task', pk=pk)
@staff_member_required
def task_index(request):
context = get_index_items(
request,
Task,
active_nav='task',
app_settings_model=AppSettings,
edit_url='task_edit',
order_by=('-updated', ),
search_fields=('name', ),
show_search=True)
return render(request, 'task_index.html', context)
@login_required
def time(request, pk=None):
"""
Authenticate users can only see their own time entries unless
they are staff members.
"""
message = 'Sorry, you are not allowed to view that time entry.'
time_entry = get_object_or_404(Time, pk=pk)
# No user
if not time_entry.user and not request.user.is_staff:
messages.add_message(request, messages.WARNING, message)
return HttpResponseRedirect(reverse('home'))
# Time entry user does not match current user
elif (not time_entry.user.username == request.user.username and
not request.user.is_staff):
messages.add_message(request, messages.WARNING, message)
return HttpResponseRedirect(reverse('home'))
else:
context = get_page_items(
request,
app_settings_model=AppSettings,
model=Time,
profile_model=Profile,
pk=pk)
return render(request, 'time.html', context)
@login_required
def time_edit(request, pk=None):
if request.user.is_staff:
time_form = AdminTimeForm
else:
time_form = TimeForm
return edit(
request,
form_model=time_form,
model=Time,
active_nav='time',
invoice_model=Invoice,
estimate_model=Estimate,
project_model=Project,
task_model=Task,
time_model=Time,
pk=pk, )
@login_required
def time_index(request):
search_fields = ('client__name', 'date', 'log', 'pk', 'project__name',
'invoice__document_id', 'user__username')
context = get_index_items(
request,
Time,
active_nav='time',
app_settings_model=AppSettings,
columns_visible={
'time': {
'invoiced': 'true',
'invoice': 'true',
'estimate': 'true',
},
},
edit_url='time_edit',
order_by=('-updated', ),
search_fields=search_fields,
show_search=True)
if not request.user.is_staff:
return HttpResponseRedirect(reverse('login'))
else:
return render(request, 'time_index.html', context)
@login_required
def user(request, pk=None):
if not request.user.pk == pk:
message = 'Sorry, you are not allowed to view that user.'
messages.add_message(request, messages.WARNING, message)
return HttpResponseRedirect(reverse('home'))
else:
order_by = {
'time': ('-updated', ),
'project': ('-updated', ),
}
context = get_page_items(
request,
app_settings_model=AppSettings,
contact_model=Contact,
model=User,
order_by=order_by,
profile_model=Profile,
project_model=Project,
time_model=Time,
pk=pk)
return render(request, 'user.html', context)
@staff_member_required
def user_index(request):
context = get_index_items(
request,
User,
active_nav='dropdown',
app_settings_model=AppSettings,
company_model=Company,
contact_model=Contact,
order_by=('-profile__active', '-profile__updated'),
show_search=False)
return render(request, 'user_index.html', context)
@staff_member_required
def plot(request):
"""
"""
Update
from .forms import AdminProfileForm
from .forms import AdminTimeForm
from .forms import AppSettingsForm
from .forms import ClientForm
from .forms import CompanyForm
from .forms import ContactForm
from .forms import ContractForm
from .forms import ContractSettingsForm
from .forms import EstimateForm
from .forms import FileForm
from .forms import InvoiceForm
from .forms import MailForm
from .forms import NewsletterForm
from .forms import NoteForm
from .forms import ProfileForm
from .forms import ProjectForm
from .forms import ProposalForm
from .forms import ReportForm
from .forms import ServiceForm
from .forms import TaskForm
from .forms import TimeForm
from .models import AppSettings
from .models import Client
from .models import Company
from .models import Contact
from .models import Contract
from .models import ContractSettings
from .models import Estimate
from .models import File
from .models import Invoice
from .models import Log
from .models import Newsletter
from .models import Note
from .models import Profile
from .models import Project
from .models import Proposal
from .models import Report
from .models import Service
from .models import Testimonial
from .models import Task
from .models import Time
from .serializers import ClientSerializer
from .serializers import ProfileSerializer
from .serializers import ServiceSerializer
from .serializers import TestimonialSerializer
from .utils import edit
from .utils import generate_doc
from .utils import get_client_city
from .utils import get_company_name
from .utils import get_index_items
from .utils import get_page_items
from django.contrib import messages
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django_xhtml2pdf.utils import generate_pdf
from io import BytesIO
from rest_framework import viewsets
# Create your views here.
class ClientViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Client.objects.filter(published=True).order_by('name')
serializer_class = ClientSerializer
class ServiceViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Service.objects.filter(active=True).order_by('name')
serializer_class = ServiceSerializer
class TestimonialViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Testimonial.objects.filter(active=True).order_by('-issue_date')
serializer_class = TestimonialSerializer
class ProfileViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Profile.objects.filter(
published=True).order_by('user__first_name')
serializer_class = ProfileSerializer
@staff_member_required
def client(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
contact_model=Contact,
contract_model=Contract,
model=Client,
pk=pk,
project_model=Project)
return render(request, 'client.html', context)
@staff_member_required
def client_edit(request, pk=None):
return edit(
request,
form_model=ClientForm,
model=Client,
active_nav='client',
pk=pk)
@staff_member_required
def client_index(request):
context = get_index_items(
request,
Client,
active_nav='client',
app_settings_model=AppSettings,
edit_url='client_edit',
order_by=('-active', '-updated', 'name'),
search_fields=('address', 'name'),
show_search=True)
return render(request, 'client_index.html', context)
@staff_member_required
def contact(request, pk=None):
context = get_page_items(
request, app_settings_model=AppSettings, model=Contact, pk=pk)
return render(request, 'contact.html', context)
@staff_member_required
def contact_edit(request, pk=None):
return edit(
request,
form_model=ContactForm,
model=Contact,
active_nav='contact',
client_model=Client,
user_model=User,
pk=pk)
@staff_member_required
def contact_index(request):
context = get_index_items(
request,
Contact,
active_nav='contact',
app_settings_model=AppSettings,
edit_url='contact_edit',
order_by=('-active', 'first_name'),
search_fields=('first_name', 'last_name', 'email', 'notes', 'pk'),
show_search=True)
return render(request, 'contact_index.html', context)
@staff_member_required
def contract(request, pk=None):
"""
"""
company = Company.get_solo()
context = get_page_items(
request, company_model=Company, model=Contract, pk=pk, time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = get_company_name(company)
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_contract.html', context=context, file_object=response)
if context['doc']:
# https://stackoverflow.com/a/24122313/185820
document = generate_doc(contract)
filename = get_company_name(company)
f = BytesIO()
document.save(f)
length = f.tell()
f.seek(0)
content_type = 'application/vnd.openxmlformats-'
content_type += 'officedocument.wordprocessingml.document'
response = HttpResponse(f.getvalue(), content_type=content_type)
response['Content-Disposition'] = 'filename=%s.docx' % filename
response['Content-Length'] = length
return response
else:
return render(request, 'contract.html', context)
@staff_member_required
def contract_edit(request, pk=None):
"""
"""
return edit(
request,
form_model=ContractForm,
model=Contract,
active_nav='contract',
pk=pk)
@staff_member_required
def contract_index(request):
"""
"""
context = get_index_items(
request,
Contract,
active_nav='contract',
order_by=('-updated', ),
app_settings_model=AppSettings)
return render(request, 'contract_index.html', context)
@staff_member_required
def estimate(request, pk=None):
order_by = {'time': ('date', ), }
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
model=Estimate,
order_by=order_by,
pk=pk,
time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
filename = '-'.join(['estimate', pk])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'estimate.html', context)
@staff_member_required
def estimate_edit(request, pk=None):
return edit(
request,
form_model=EstimateForm,
model=Estimate,
active_nav='estimate',
company_model=Company,
project_model=Project,
pk=pk)
@staff_member_required
def estimate_index(request):
company = Company.get_solo()
context = get_index_items(
request,
Estimate,
active_nav='estimate',
app_settings_model=AppSettings,
edit_url='estimate_edit',
order_by=('-issue_date', ),
search_fields=('subject', ),
show_search=True)
context['company'] = company
return render(request, 'estimate_index.html', context)
@staff_member_required
def file_view(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
model=File,
pk=pk)
return render(request, 'file.html', context)
@staff_member_required
def file_edit(request, pk=None):
return edit(
request,
form_model=FileForm,
model=File,
active_nav='dropdown',
company_model=Company,
pk=pk, )
@staff_member_required
def file_index(request):
context = get_index_items(
request,
File,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ))
return render(request, 'file_index.html', context)
def home(request):
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
columns_visible={
'note': {
'due': 'false',
'hidden': 'false'
},
'invoice': {
'paid': 'false',
},
},
invoice_model=Invoice,
note_model=Note,
order_by={
'note': ('-updated', ),
'project': ('-updated', ),
'time': ('-updated', ),
},
project_model=Project,
time_model=Time,
report_model=Report)
return render(request, 'home.html', context)
@staff_member_required
def invoice(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
company_model=Company,
model=Invoice,
order_by={'time': ('date', )}, # For time entries
pk=pk,
time_model=Time)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
company_name = get_company_name(context['company'])
model_name = context['model_name'].upper()
doc_id = context['item'].document_id or pk
filename = '_'.join([company_name, model_name, str(doc_id)])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'invoice.html', context)
@staff_member_required
def invoice_edit(request, pk=None):
return edit(
request,
form_model=InvoiceForm,
model=Invoice,
active_nav='invoice',
company_model=Company,
project_model=Project,
pk=pk, )
@staff_member_required
def invoice_index(request):
search_fields = (
'client__name',
'document_id',
'issue_date',
'project__name',
'subject', )
context = get_index_items(
request,
Invoice,
active_nav='invoice',
app_settings_model=AppSettings,
edit_url='invoice_edit',
order_by=('-updated', ),
search_fields=search_fields,
show_search=True)
return render(request, 'invoice_index.html', context)
def login(request):
context = {}
context['login'] = True
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
# https://stackoverflow.com/a/39316967/185820
auth_login(request, user)
city_data = get_client_city(request)
log = Log(entry='%s logged in from %s' % (user, city_data))
log.save()
Profile.objects.get_or_create(user=user)
return HttpResponseRedirect(reverse('home'))
else:
messages.add_message(request, messages.WARNING, 'Login failed.')
return HttpResponseRedirect(reverse('home'))
return render(request, 'login.html', context)
@staff_member_required
def log_index(request):
context = get_index_items(
request,
Log,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ),
search_fields=('entry', ))
return render(request, 'log_index.html', context)
@staff_member_required(login_url='login')
def mail(request):
"""
"""
return edit(
request,
contact_model=Contact,
form_model=MailForm,
note_model=Note,
page_type='edit')
@staff_member_required
def newsletter(request, pk=None):
"""
"""
context = get_page_items(
request, app_settings_model=AppSettings, model=Newsletter, pk=pk)
return render(request, 'newsletter.html', context)
@staff_member_required
def newsletter_edit(request, pk=None):
"""
"""
return edit(
request,
form_model=NewsletterForm,
model=Newsletter,
active_nav='dropdown',
pk=pk)
@staff_member_required
def newsletter_index(request, pk=None):
"""
"""
context = get_index_items(
request,
Newsletter,
active_nav='dropdown',
app_settings_model=AppSettings,
order_by=('-updated', ),
search_fields=('text', ))
return render(request, 'newsletter_index.html', context)
@staff_member_required
def note(request, pk=None):
context = get_page_items(
request, app_settings_model=AppSettings, model=Note, pk=pk)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=note-%s.pdf' % pk
return generate_pdf(
'pdf_note.html', context=context, file_object=response)
else:
return render(request, 'note.html', context)
# https://stackoverflow.com/a/42038839/185820
@staff_member_required(login_url='login')
def note_edit(request, pk=None):
return edit(
request,
form_model=NoteForm,
model=Note,
active_nav='note',
app_settings_model=AppSettings,
client_model=Client,
company_model=Company,
pk=pk)
@staff_member_required
def note_index(request, pk=None):
context = get_index_items(
request,
Note,
active_nav='note',
app_settings_model=AppSettings,
order_by=('-active', '-updated'),
search_fields=('note', 'title'),
show_search=True)
context['edit_url'] = 'note_edit'
return render(request, 'note_index.html', context)
@login_required
def profile_edit(request, pk=None):
if request.user.is_staff:
profile_form = AdminProfileForm
else:
profile_form = ProfileForm
return edit(
request,
form_model=profile_form,
model=Profile,
active_nav='dropdown',
pk=pk)
@staff_member_required
def project(request, pk=None):
context = get_page_items(
request,
app_settings_model=AppSettings,
model=Project,
contact_model=Contact,
estimate_model=Estimate,
invoice_model=Invoice,
user_model=User,
order_by={'time': ('date', )}, # For time entries
time_model=Time,
pk=pk)
return render(request, 'project.html', context)
@staff_member_required
def project_edit(request, pk=None):
return edit(
request,
form_model=ProjectForm,
model=Project,
client_model=Client,
active_nav='project',
pk=pk)
@staff_member_required
def project_index(request, pk=None):
context = get_index_items(
request,
Project,
active_nav='project',
app_settings_model=AppSettings,
columns_visible={'project': {
'notes': 'true',
}, },
edit_url='project_edit',
order_by=(
'-active',
'-updated', ),
search_fields=('id', 'name'),
show_search=True)
return render(request, 'project_index.html', context)
@staff_member_required
def proposal(request, pk=None):
context = get_page_items(
request, company_model=Company, model=Proposal, pk=pk)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=proposal-%s.pdf' % pk
return generate_pdf(
'pdf_proposal.html', context=context, file_object=response)
else:
return render(request, 'proposal.html', context)
@staff_member_required
def proposal_edit(request, pk=None):
"""
"""
return edit(
request,
form_model=ProposalForm,
model=Proposal,
active_nav='proposal',
company_model=Company,
pk=pk)
@staff_member_required
def proposal_index(request, pk=None):
context = get_index_items(
request,
Proposal,
active_nav='proposal',
app_settings_model=AppSettings,
order_by=('-updated', ),
show_search=True)
context['edit_url'] = 'proposal_edit'
return render(request, 'proposal_index.html', context)
@staff_member_required
def report(request, pk=None):
context = get_page_items(
request, model=Report, app_settings_model=AppSettings, pk=pk)
if context['pdf']:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=report-%s.pdf' % pk
return generate_pdf(
'pdf_report.html', context=context, file_object=response)
else:
return render(request, 'report.html', context)
@staff_member_required
def report_edit(request, pk=None):
return edit(
request,
form_model=ReportForm,
model=Report,
active_nav='dropdown',
invoice_model=Invoice,
pk=pk)
@staff_member_required
def report_index(request):
context = get_index_items(
request,
Report,
active_nav='dropdown',
app_settings_model=AppSettings,
edit_url='report_edit',
order_by=('-updated', '-active'),
search_fields=('id', 'name', 'gross', 'net'),
show_search=True)
return render(request, 'report_index.html', context)
# https://stackoverflow.com/a/42038839/185820
@staff_member_required(login_url='login')
def service_edit(request, pk=None):
return edit(
request,
form_model=ServiceForm,
model=Service,
active_nav='dropdown',
company_model=Company,
pk=pk)
@staff_member_required
def settings_app(request):
context = get_page_items(
request, model=AppSettings, app_settings_model=AppSettings)
return render(request, 'settings.html', context)
@staff_member_required
def settings_app_edit(request, pk=None):
return edit(
request,
form_model=AppSettingsForm,
model=AppSettings,
active_nav='dropdown',
pk=1)
@staff_member_required
def settings_company_edit(request, pk=None):
return edit(
request,
form_model=CompanyForm,
model=Company,
active_nav='dropdown',
pk=1)
@staff_member_required
def settings_company(request):
context = get_page_items(
request, app_settings_model=AppSettings, model=Company)
return render(request, 'company.html', context)
@staff_member_required
def settings_contract(request):
context = get_page_items(
request, model=ContractSettings, app_settings_model=AppSettings)
return render(request, 'contract_settings.html', context)
@staff_member_required
def settings_contract_edit(request, pk=None):
return edit(
request,
form_model=ContractSettingsForm,
model=ContractSettings,
pk=1,
active_nav='dropdown')
@staff_member_required
def task(request, pk=None):
context = get_page_items(
request, model=Task, app_settings_model=AppSettings, pk=pk)
return render(request, 'task.html', context)
@staff_member_required
def task_edit(request, pk=None):
return edit(
request, form_model=TaskForm, model=Task, active_nav='task', pk=pk)
@staff_member_required
def task_index(request):
context = get_index_items(
request,
Task,
active_nav='task',
app_settings_model=AppSettings,
edit_url='task_edit',
order_by=('-updated', ),
search_fields=('name', ),
show_search=True)
return render(request, 'task_index.html', context)
@login_required
def time(request, pk=None):
"""
Authenticate users can only see their own time entries unless
they are staff members.
"""
message = 'Sorry, you are not allowed to view that time entry.'
time_entry = get_object_or_404(Time, pk=pk)
# No user
if not time_entry.user and not request.user.is_staff:
messages.add_message(request, messages.WARNING, message)
return HttpResponseRedirect(reverse('home'))
# Time entry user does not match current user
elif (not time_entry.user.username == request.user.username and
not request.user.is_staff):
messages.add_message(request, messages.WARNING, message)
return HttpResponseRedirect(reverse('home'))
else:
context = get_page_items(
request,
app_settings_model=AppSettings,
model=Time,
profile_model=Profile,
pk=pk)
return render(request, 'time.html', context)
@login_required
def time_edit(request, pk=None):
if request.user.is_staff:
time_form = AdminTimeForm
else:
time_form = TimeForm
return edit(
request,
form_model=time_form,
model=Time,
active_nav='time',
invoice_model=Invoice,
estimate_model=Estimate,
project_model=Project,
task_model=Task,
time_model=Time,
pk=pk, )
@login_required
def time_index(request):
search_fields = ('client__name', 'date', 'log', 'pk', 'project__name',
'invoice__document_id', 'user__username')
context = get_index_items(
request,
Time,
active_nav='time',
app_settings_model=AppSettings,
columns_visible={
'time': {
'invoiced': 'true',
'invoice': 'true',
'estimate': 'true',
},
},
edit_url='time_edit',
order_by=('-updated', ),
search_fields=search_fields,
show_search=True)
if not request.user.is_staff:
return HttpResponseRedirect(reverse('login'))
else:
return render(request, 'time_index.html', context)
@login_required
def user(request, pk=None):
if not request.user.pk == int(pk):
message = 'Sorry, you are not allowed to view that user.'
messages.add_message(request, messages.WARNING, message)
return HttpResponseRedirect(reverse('home'))
else:
order_by = {
'time': ('-updated', ),
'project': ('-updated', ),
}
context = get_page_items(
request,
app_settings_model=AppSettings,
contact_model=Contact,
model=User,
order_by=order_by,
profile_model=Profile,
project_model=Project,
time_model=Time,
pk=pk)
return render(request, 'user.html', context)
@staff_member_required
def user_index(request):
context = get_index_items(
request,
User,
active_nav='dropdown',
app_settings_model=AppSettings,
company_model=Company,
contact_model=Contact,
order_by=('-profile__active', '-profile__updated'),
show_search=False)
return render(request, 'user_index.html', context)
@staff_member_required
def plot(request):
"""
"""
|
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2008-2013 Sigasi
:license: BSD, see LICENSE for more details.
"""
from string import Template
import os
class LibraryMappingFileCreator:
"""A Library Mapping File Creator helps you to easily create a Sigasi Library Mapping file.
You can add library mappings by calling the add_mapping method.
To create the .library_mapping file content, simply call str() of your LibraryMappingFileCreator instance.
Typical example:
creator = LibraryMappingFileCreator()
creator.add_mapping(test.vhd, "myLib")
creator.add_mapping(Copy of test.vhd, "not mapped")
return str(creator)
"""
__LIBRARIES_TEMPLATE = Template(
'''<?xml version="1.0" encoding="UTF-8"?>
<com.sigasi.hdt.vhdl.scoping.librarymapping.model:LibraryMappings xmlns:com.sigasi.hdt.vhdl.scoping.librarymapping.model="com.sigasi.hdt.vhdl.scoping.librarymapping" Version="2">
$mappings</com.sigasi.hdt.vhdl.scoping.librarymapping.model:LibraryMappings>
''')
__MAPPING_TEMPLATE = Template(''' <Mappings Location="$path" Library="$library"/>
''')
__DEFAULT_MAPPINGS = {
"Common Libraries/IEEE":"ieee",
"Common Libraries/IEEE Synopsys":"ieee",
"Common Libraries":"not mapped",
"Common Libraries/STD":"std"
}
def __init__(self):
self.__entries = dict()
self.__add_default_mappings()
def __add_default_mappings(self):
for path, library in self.__DEFAULT_MAPPINGS.items():
self.add_mapping(path, library)
def __str__(self):
mappings = ""
for (path, library) in sorted(self.__entries.items()):
mappings += self.__MAPPING_TEMPLATE.substitute(
path=path,
library=library)
return self.__LIBRARIES_TEMPLATE.substitute(mappings=mappings)
def add_mapping(self, path, library):
self.__entries[path] = library
def unmap(self, path):
self.__entries[path] = "not mapped"
def write(self, destination):
library_mapping_file = os.path.join(destination, ".library_mapping.xml")
f = open(library_mapping_file, 'wb')
try:
f.write(str(self))
finally:
f.close()
from string import Template
import os
class ProjectFileCreator():
"""A Project File Creator helps you to easily create a Sigasi Project file.
You can specify the VHDL version (93,2002 or 2008) in the constructor.
You can add linked resources to your project by calling the add_link method.
To create the .project file, simply call str() of your ProjectFileCreator instance.
Typical example:
creator = ProjectFileCreator(project_name)
creator.add_link("test.vhd", "/home/heeckhau/shared/test.vhd")
creator.write("/home/heeckhau/test/")
"""
__LINK_TEMPLATE = Template(
'''\t\t<link>
\t\t\t<name>$name</name>
\t\t\t<type>$link_type</type>
\t\t\t<$loc_type>$location</$loc_type>
\t\t</link>
''')
__PROJECT_REFERENCE_TEMPLATE = Template(
'''\t\t<project>$name</project>\n'''
)
__PROJECT_FILE_TEMPLATE = Template(
'''<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
\t<name>${project_name}</name>
\t<comment></comment>
\t<projects>
${project_references}\t</projects>
\t<buildSpec>
\t\t<buildCommand>
\t\t\t<name>org.eclipse.xtext.ui.shared.xtextBuilder</name>
\t\t\t<arguments>
\t\t\t</arguments>
\t\t</buildCommand>
\t</buildSpec>
\t<natures>
\t\t<nature>com.sigasi.hdt.vhdl.ui.vhdlNature</nature>
\t\t<nature>org.eclipse.xtext.ui.shared.xtextNature</nature>
\t</natures>
\t<linkedResources>
${links}\t</linkedResources>
</projectDescription>''')
__DEFAULT_LINKS=[
["Common Libraries",Template("virtual:/virtual")],
["Common Libraries/IEEE",Template("sigasiresource:/vhdl/${version}/IEEE")],
["Common Libraries/IEEE Synopsys",Template("sigasiresource:/vhdl/${version}/IEEE%20Synopsys")],
["Common Libraries/STD",Template("sigasiresource:/vhdl/${version}/STD")],
]
def __init__(self, project_name, version=93):
if version not in {93, 2002, 2008}:
raise ValueError('Only 93, 2002 and 2008 are allowed as VHDL version number')
self.__project_name = project_name
self.__version = version
self.__links = []
self.__project_references = []
self.__add_default_links()
def __add_default_links(self):
for name, template in self.__DEFAULT_LINKS:
self.__links.append([name, template.substitute(version=self.__version), 2, False])
def __str__(self):
links = ""
project_references=""
for [name, location, link_type, is_path] in self.__links:
location_type = "location" if is_path else "locationURI"
links += self.__LINK_TEMPLATE.substitute(
name=name,
link_type=link_type,
loc_type=location_type,
location=location)
for project_reference in self.__project_references:
project_references += self.__PROJECT_REFERENCE_TEMPLATE.substitute(
name=project_reference)
return self.__PROJECT_FILE_TEMPLATE.substitute(
project_name = self.__project_name,
project_references=project_references,
links=links
)
def add_link(self, name, location, link_type=1):
if link_type not in {1, 2}:
raise ValueError('Only types 1 and 2 are allowed. 1 is file, 2 is folder')
if name.startswith(".."):
raise ValueError('invalid name "' + name + '", a name can not start with dots')
self.__links.append([name, location, link_type, True])
def add_project_reference(self, name):
self.__project_references.append(name)
def write(self, destination):
project_file = os.path.join(destination, ".project")
f = open(project_file, 'wb')
try:
f.write(str(self))
finally:
f.close()
class SigasiProjectCreator():
"""This class helps you to easily create a Sigasi project (".project")
and library mapping (".library_mapping.xml") file.
Typical example:
creator = SigasiProjectCreator(project_name, 93)
creator.add_link("test.vhd", "/home/heeckhau/shared/test.vhd")
creator.add_mapping("test.vhd", "myLib")
creator.write("/home/heeckhau/test/")
"""
def __init__(self, project_name, version=93):
self.__libraryMappingFileCreator = LibraryMappingFileCreator()
self.__projectFileCreator = ProjectFileCreator(project_name, version)
def add_link(self, name, location, link_type=1):
location = location.replace("\\","/")
if link_type not in {1, 2}:
raise ValueError('Only types 1 and 2 are allowed. 1 is file, 2 is folder')
self.__projectFileCreator.add_link(name, location, link_type)
def add_mapping(self, path, library):
path = path.replace("\\","/")
self.__libraryMappingFileCreator.add_mapping(path, library)
def unmap(self, path):
path = path.replace("\\","/")
self.__libraryMappingFileCreator.unmap(path)
def write(self, destination):
self.__projectFileCreator.write(destination)
self.__libraryMappingFileCreator.write(destination)
def add_unisim(self, unisim_location):
self.__projectFileCreator.add_link("Common Libraries/unisim", unisim_location, 2)
self.__libraryMappingFileCreator.add_mapping("Common Libraries/unisim","unisims")
self.__libraryMappingFileCreator.unmap("Common Libraries/unisim/primitive")
self.__libraryMappingFileCreator.unmap("Common Libraries/unisim/secureip")
def add_unimacro(self, unimacro_location):
self.__projectFileCreator.add_link("Common Libraries/unimacro", unimacro_location, 2)
self.__libraryMappingFileCreator.add_mapping("Common Libraries/unimacro/unimacro_VCOMP.vhd", "unimacro")
def add_project_reference(self, name):
self.__projectFileCreator.add_project_reference(name)
Fixed inconsistent indentation that cause Python errors
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2008-2013 Sigasi
:license: BSD, see LICENSE for more details.
"""
from string import Template
import os
class LibraryMappingFileCreator:
"""A Library Mapping File Creator helps you to easily create a Sigasi Library Mapping file.
You can add library mappings by calling the add_mapping method.
To create the .library_mapping file content, simply call str() of your LibraryMappingFileCreator instance.
Typical example:
creator = LibraryMappingFileCreator()
creator.add_mapping(test.vhd, "myLib")
creator.add_mapping(Copy of test.vhd, "not mapped")
return str(creator)
"""
__LIBRARIES_TEMPLATE = Template(
'''<?xml version="1.0" encoding="UTF-8"?>
<com.sigasi.hdt.vhdl.scoping.librarymapping.model:LibraryMappings xmlns:com.sigasi.hdt.vhdl.scoping.librarymapping.model="com.sigasi.hdt.vhdl.scoping.librarymapping" Version="2">
$mappings</com.sigasi.hdt.vhdl.scoping.librarymapping.model:LibraryMappings>
''')
__MAPPING_TEMPLATE = Template(''' <Mappings Location="$path" Library="$library"/>
''')
__DEFAULT_MAPPINGS = {
"Common Libraries/IEEE":"ieee",
"Common Libraries/IEEE Synopsys":"ieee",
"Common Libraries":"not mapped",
"Common Libraries/STD":"std"
}
def __init__(self):
self.__entries = dict()
self.__add_default_mappings()
def __add_default_mappings(self):
for path, library in self.__DEFAULT_MAPPINGS.items():
self.add_mapping(path, library)
def __str__(self):
mappings = ""
for (path, library) in sorted(self.__entries.items()):
mappings += self.__MAPPING_TEMPLATE.substitute(
path=path,
library=library)
return self.__LIBRARIES_TEMPLATE.substitute(mappings=mappings)
def add_mapping(self, path, library):
self.__entries[path] = library
def unmap(self, path):
self.__entries[path] = "not mapped"
def write(self, destination):
library_mapping_file = os.path.join(destination, ".library_mapping.xml")
f = open(library_mapping_file, 'wb')
try:
f.write(str(self))
finally:
f.close()
from string import Template
import os
class ProjectFileCreator():
"""A Project File Creator helps you to easily create a Sigasi Project file.
You can specify the VHDL version (93,2002 or 2008) in the constructor.
You can add linked resources to your project by calling the add_link method.
To create the .project file, simply call str() of your ProjectFileCreator instance.
Typical example:
creator = ProjectFileCreator(project_name)
creator.add_link("test.vhd", "/home/heeckhau/shared/test.vhd")
creator.write("/home/heeckhau/test/")
"""
__LINK_TEMPLATE = Template(
'''\t\t<link>
\t\t\t<name>$name</name>
\t\t\t<type>$link_type</type>
\t\t\t<$loc_type>$location</$loc_type>
\t\t</link>
''')
__PROJECT_REFERENCE_TEMPLATE = Template(
'''\t\t<project>$name</project>\n'''
)
__PROJECT_FILE_TEMPLATE = Template(
'''<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
\t<name>${project_name}</name>
\t<comment></comment>
\t<projects>
${project_references}\t</projects>
\t<buildSpec>
\t\t<buildCommand>
\t\t\t<name>org.eclipse.xtext.ui.shared.xtextBuilder</name>
\t\t\t<arguments>
\t\t\t</arguments>
\t\t</buildCommand>
\t</buildSpec>
\t<natures>
\t\t<nature>com.sigasi.hdt.vhdl.ui.vhdlNature</nature>
\t\t<nature>org.eclipse.xtext.ui.shared.xtextNature</nature>
\t</natures>
\t<linkedResources>
${links}\t</linkedResources>
</projectDescription>''')
__DEFAULT_LINKS=[
["Common Libraries",Template("virtual:/virtual")],
["Common Libraries/IEEE",Template("sigasiresource:/vhdl/${version}/IEEE")],
["Common Libraries/IEEE Synopsys",Template("sigasiresource:/vhdl/${version}/IEEE%20Synopsys")],
["Common Libraries/STD",Template("sigasiresource:/vhdl/${version}/STD")],
]
def __init__(self, project_name, version=93):
if version not in {93, 2002, 2008}:
raise ValueError('Only 93, 2002 and 2008 are allowed as VHDL version number')
self.__project_name = project_name
self.__version = version
self.__links = []
self.__project_references = []
self.__add_default_links()
def __add_default_links(self):
for name, template in self.__DEFAULT_LINKS:
self.__links.append([name, template.substitute(version=self.__version), 2, False])
def __str__(self):
links = ""
project_references=""
for [name, location, link_type, is_path] in self.__links:
location_type = "location" if is_path else "locationURI"
links += self.__LINK_TEMPLATE.substitute(
name=name,
link_type=link_type,
loc_type=location_type,
location=location)
for project_reference in self.__project_references:
project_references += self.__PROJECT_REFERENCE_TEMPLATE.substitute(
name=project_reference)
return self.__PROJECT_FILE_TEMPLATE.substitute(
project_name = self.__project_name,
project_references=project_references,
links=links
)
def add_link(self, name, location, link_type=1):
if link_type not in {1, 2}:
raise ValueError('Only types 1 and 2 are allowed. 1 is file, 2 is folder')
if name.startswith(".."):
raise ValueError('invalid name "' + name + '", a name can not start with dots')
self.__links.append([name, location, link_type, True])
def add_project_reference(self, name):
self.__project_references.append(name)
def write(self, destination):
project_file = os.path.join(destination, ".project")
f = open(project_file, 'wb')
try:
f.write(str(self))
finally:
f.close()
class SigasiProjectCreator():
"""This class helps you to easily create a Sigasi project (".project")
and library mapping (".library_mapping.xml") file.
Typical example:
creator = SigasiProjectCreator(project_name, 93)
creator.add_link("test.vhd", "/home/heeckhau/shared/test.vhd")
creator.add_mapping("test.vhd", "myLib")
creator.write("/home/heeckhau/test/")
"""
def __init__(self, project_name, version=93):
self.__libraryMappingFileCreator = LibraryMappingFileCreator()
self.__projectFileCreator = ProjectFileCreator(project_name, version)
def add_link(self, name, location, link_type=1):
location = location.replace("\\","/")
if link_type not in {1, 2}:
raise ValueError('Only types 1 and 2 are allowed. 1 is file, 2 is folder')
self.__projectFileCreator.add_link(name, location, link_type)
def add_mapping(self, path, library):
path = path.replace("\\","/")
self.__libraryMappingFileCreator.add_mapping(path, library)
def unmap(self, path):
path = path.replace("\\","/")
self.__libraryMappingFileCreator.unmap(path)
def write(self, destination):
self.__projectFileCreator.write(destination)
self.__libraryMappingFileCreator.write(destination)
def add_unisim(self, unisim_location):
self.__projectFileCreator.add_link("Common Libraries/unisim", unisim_location, 2)
self.__libraryMappingFileCreator.add_mapping("Common Libraries/unisim","unisims")
self.__libraryMappingFileCreator.unmap("Common Libraries/unisim/primitive")
self.__libraryMappingFileCreator.unmap("Common Libraries/unisim/secureip")
def add_unimacro(self, unimacro_location):
self.__projectFileCreator.add_link("Common Libraries/unimacro", unimacro_location, 2)
self.__libraryMappingFileCreator.add_mapping("Common Libraries/unimacro/unimacro_VCOMP.vhd", "unimacro")
def add_project_reference(self, name):
self.__projectFileCreator.add_project_reference(name)
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.conf import settings
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django_xhtml2pdf.utils import generate_pdf
from .forms import ClientForm
from .forms import CompanyForm
from .forms import ContactForm
from .forms import EstimateForm
from .forms import InvoiceForm
from .forms import MailForm
from .forms import ProfileForm
from .forms import ProjectForm
from .forms import TaskForm
from .models import Client
from .models import Company
from .models import Contact
from .models import Estimate
from .models import Invoice
from .models import Profile
from .models import Project
from .models import Task
from .models import Time
from .utils import add_user_to_contacts
from .utils import dashboard_total
from .utils import edit
from .utils import entries_total
from .utils import last_month
from .utils import search
# Create your views here.
@staff_member_required
def client(request, pk=None):
context = {}
client = get_object_or_404(Client, pk=pk)
contacts = Contact.objects.filter(client=client, active=True)
contacts = contacts.order_by('-pk')
projects = Project.objects.filter(client=client)
projects = projects.order_by('-start_date')
context['client'] = client
context['contacts'] = contacts
context['projects'] = projects
return render(request, 'client.html', context)
@staff_member_required
def client_edit(request, pk=None):
kwargs = {}
url_name = 'client_index'
if pk:
kwargs['pk'] = pk
url_name = 'client'
return edit(request,
ClientForm,
Client,
url_name,
'client_edit.html',
kwargs=kwargs,
pk=pk)
@staff_member_required
def client_index(request):
context = {}
active = request.GET.get('active')
if active:
context['active'] = True
fields = ('address', 'name')
order_by = '-pk'
context, items = search(request,
Client,
fields,
order_by=order_by,
context=context)
context['items'] = items
return render(request, 'client_index.html', context)
@staff_member_required
def company_edit(request, pk=None):
return edit(request,
CompanyForm,
Company,
'company',
'company_edit.html',
pk=1)
@staff_member_required
def company(request):
context = {}
company = Company.get_solo()
context['company'] = company
return render(request, 'company.html', context)
@staff_member_required
def contact(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
context['contact'] = contact
return render(request, 'contact.html', context)
@staff_member_required
def contact_edit(request, pk=None):
url_name = 'contact_index'
kwargs = {}
if pk:
kwargs['pk'] = pk
url_name = 'contact'
client = request.GET.get('client')
if client:
client = get_object_or_404(Client, pk=client)
url_name = 'client_index'
return edit(request,
ContactForm,
Contact,
url_name,
'contact_edit.html',
client=client,
kwargs=kwargs,
pk=pk)
@staff_member_required
def contact_index(request):
context = {}
active = request.GET.get('active')
if active:
context['active'] = True
fields = ('first_name', 'last_name', 'email', 'notes')
order_by = '-pk'
context, items = search(request,
Contact,
fields,
order_by=order_by,
context=context)
context['items'] = items
return render(request, 'contact_index.html', context)
@staff_member_required
def contact_mail(request, pk=None):
context = {}
recipients = []
contact = get_object_or_404(Contact, pk=pk)
if request.method == 'POST':
form = MailForm(request.POST)
if form.is_valid():
sender = settings.DEFAULT_FROM_EMAIL
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
recipients.append(contact.email)
send_mail(subject,
message,
sender,
recipients,
fail_silently=False)
messages.add_message(request, messages.SUCCESS, 'Message sent!')
return HttpResponseRedirect(reverse('contact_index'))
else:
form = MailForm()
context['form'] = form
context['contact'] = contact
return render(request, 'contact_mail.html', context)
@staff_member_required
def estimate(request, pk=None):
context = {}
company = Company.get_solo()
if company:
context['company'] = company
estimate = get_object_or_404(Estimate, pk=pk)
document_id = str(estimate.document_id)
document_type = estimate._meta.verbose_name
document_type_upper = document_type.upper()
document_type_title = document_type.title()
context['document'] = estimate
context['document_type_upper'] = document_type_upper
context['document_type_title'] = document_type_title
times_client = Time.objects.filter(client=estimate.client,
estimate=None,
project=None)
times_estimate = Time.objects.filter(estimate=estimate)
times = times_client | times_estimate
times = times.order_by('-pk')
entries, subtotal, paid_amount, hours, amount = entries_total(times)
context['entries'] = entries
context['amount'] = amount
context['paid_amount'] = paid_amount
context['subtotal'] = subtotal
context['hours'] = hours
pdf = request.GET.get('pdf')
context['pdf'] = pdf
if pdf:
company_name = ''
if company.name:
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
response = HttpResponse(content_type='application/pdf')
filename = '_'.join([document_type_upper, document_id, company_name])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf('entry_table.html',
context=context,
file_object=response)
else:
return render(request, 'estimate.html', context)
@staff_member_required
def estimate_edit(request, pk=None):
amount = request.GET.get('amount')
paid_amount = request.GET.get('paid_amount')
subtotal = request.GET.get('subtotal')
times = request.GET.get('times')
company = Company.get_solo()
if times:
estimate = get_object_or_404(Estimate, pk=pk)
times = Time.objects.filter(pk__in=[int(i) for i in times.split(',')])
for entry in times:
entry.estimate = estimate
entry.save()
return edit(request,
EstimateForm,
Estimate,
'estimate_index',
'estimate_edit.html',
amount=amount,
paid_amount=paid_amount,
pk=pk,
subtotal=subtotal,
company=company)
@staff_member_required
def estimate_index(request):
context = {}
fields = ('subject', )
order_by = '-pk'
context, items = search(request, Estimate, fields, order_by=order_by)
context['items'] = items
return render(request, 'estimate_index.html', context)
def home(request):
context = {}
clients = Client.objects.all()
clients_active = Client.objects.filter(active=True)
company = Company.get_solo()
contacts = Contact.objects.all()
contacts_active = Contact.objects.filter(active=True)
projects = Project.objects.all()
projects_active = Project.objects.filter(active=True)
projects_active = projects_active.order_by('-start_date')
tasks = Task.objects.all()
tasks_active = Task.objects.filter(active=True)
times = Time.objects.all()
invoices = Invoice.objects.all()
invoices_active = Invoice.objects.filter(
issue_date__gt=last_month()).order_by('-document_id')
gross, net = dashboard_total(invoices_active)
estimates = Estimate.objects.all()
context['clients'] = clients
context['clients_active'] = clients_active
context['company'] = company
context['contacts'] = contacts
context['contacts_active'] = contacts_active
context['projects'] = projects
context['projects_active'] = projects_active
context['tasks'] = tasks
context['tasks_active'] = tasks_active
context['times'] = times
context['invoices'] = invoices
context['invoices_active'] = invoices_active
context['gross'] = gross
context['net'] = net
context['estimates'] = estimates
context['request'] = request
return render(request, 'home.html', context)
@staff_member_required
def invoice(request, pk=None):
context = {}
company = Company.get_solo()
if company:
context['company'] = company
invoice = get_object_or_404(Invoice, pk=pk)
document_id = str(invoice.document_id)
document_type = invoice._meta.verbose_name
document_type_upper = document_type.upper()
document_type_title = document_type.title()
context['document'] = invoice
context['document_type_upper'] = document_type_upper
context['document_type_title'] = document_type_title
times_project = Time.objects.filter(invoiced=False,
project=invoice.project,
invoice=None)
times_invoice = Time.objects.filter(invoice=invoice)
times = times_project | times_invoice
times = times.order_by('-pk')
entries, subtotal, paid_amount, hours, amount = entries_total(times)
context['entries'] = entries
context['amount'] = amount
context['paid_amount'] = paid_amount
context['subtotal'] = subtotal
context['hours'] = hours
context['invoice'] = True
pdf = request.GET.get('pdf')
context['pdf'] = pdf
if pdf:
response = HttpResponse(content_type='application/pdf')
if company.name:
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
filename = '_'.join([document_type_upper, document_id, company_name])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf('entry_table.html',
context=context,
file_object=response)
else:
return render(request, 'invoice.html', context)
@staff_member_required
def invoice_edit(request, pk=None):
amount = request.GET.get('amount')
paid_amount = request.GET.get('paid_amount')
subtotal = request.GET.get('subtotal')
times = request.GET.get('times')
company = Company.get_solo()
if pk:
invoice = get_object_or_404(Invoice, pk=pk)
if invoice.project:
if invoice.project.client and not invoice.client:
invoice.client = invoice.project.client
invoice.save()
if times:
invoice = get_object_or_404(Invoice, pk=pk)
times = Time.objects.filter(pk__in=[int(i) for i in times.split(',')])
for entry in times:
entry.invoice = invoice
entry.save()
return edit(request,
InvoiceForm,
Invoice,
'invoice_index',
'invoice_edit.html',
amount=amount,
paid_amount=paid_amount,
pk=pk,
subtotal=subtotal,
company=company)
@staff_member_required
def invoice_index(request):
context = {}
fields = ('client__name',
'document_id',
'issue_date',
'project__name',
'subject', )
order_by = '-issue_date'
context, items = search(request, Invoice, fields, order_by=order_by)
context['items'] = items
return render(request, 'invoice_index.html', context)
@staff_member_required
def project(request, pk=None):
context = {}
project = get_object_or_404(Project, pk=pk)
times = Time.objects.filter(project=project).order_by('-date')
context['project'] = project
context['times'] = times
return render(request, 'project.html', context)
@staff_member_required
def project_edit(request, pk=None):
url_name = 'project_index'
kwargs = {}
if pk:
kwargs['pk'] = pk
url_name = 'project'
client = request.GET.get('client')
if client:
client = get_object_or_404(Client, pk=client)
url_name = 'client_index'
return edit(request,
ProjectForm,
Project,
url_name,
'project_edit.html',
client=client,
kwargs=kwargs,
pk=pk)
@staff_member_required
def project_index(request, pk=None):
context = {}
active = request.GET.get('active')
if active:
context['active'] = True
fields = ('id', 'name')
order_by = '-start_date'
context, items = search(request,
Project,
fields,
order_by=order_by,
context=context)
context['items'] = items
return render(request, 'project_index.html', context)
@staff_member_required
def task(request, pk=None):
context = {}
task = get_object_or_404(Task, pk=pk)
context['task'] = task
return render(request, 'task.html', context)
@staff_member_required
def task_edit(request, pk=None):
kwargs = {}
url_name = 'task_index'
if pk:
kwargs['pk'] = pk
url_name = 'task'
return edit(request,
TaskForm,
Task,
url_name,
'task_edit.html',
pk=pk,
kwargs=kwargs)
@staff_member_required
def task_index(request):
context = {}
order_by = '-pk'
fields = ('name', )
context, items = search(request, Task, fields, order_by=order_by)
context['items'] = items
return render(request, 'task_index.html', context)
@login_required
def time(request, pk=None):
context = {}
entry = get_object_or_404(Time, pk=pk)
if not entry.user and not request.user.is_staff:
return HttpResponseRedirect(reverse('admin:index'))
if entry.user:
if (not entry.user.username == request.user.username and
not request.user.is_staff):
return HttpResponseRedirect(reverse('admin:index'))
context['entry'] = entry
return render(request, 'time.html', context)
@login_required
def time_edit(request, pk=None):
url_name = 'entry_index'
kwargs = {}
if pk:
kwargs['pk'] = pk
url_name = 'entry'
client = request.GET.get('client')
project = request.GET.get('project')
task = None
if client:
client = get_object_or_404(Client, pk=client)
if project:
project = get_object_or_404(Project, pk=project)
if project.task:
task = get_object_or_404(Task, pk=project.task.pk)
projects = Project.objects.filter(team=request.user.pk)
clients = Client.objects.filter(
pk__in=[i.client.pk for i in projects if i.client])
tasks = Task.objects.filter(pk__in=[i.task.pk for i in projects if i.task])
if request.user.is_staff:
from .forms import TimeAdminForm as TimeForm
else:
from .forms import TimeForm
return edit(request,
TimeForm,
Time,
url_name,
'time_edit.html',
client=client,
clients=clients,
pk=pk,
project=project,
projects=projects,
task=task,
tasks=tasks,
kwargs=kwargs)
@login_required
def time_index(request):
context = {}
fields = ('client__name', 'date', 'notes', 'pk', 'project__name',
'user__username')
order_by = '-pk'
context, items = search(request, Time, fields, order_by=order_by)
context['items'] = items
return render(request, 'time_index.html', context)
@login_required
def user(request, pk=None):
context = {}
user = get_object_or_404(User, pk=pk)
profile = Profile.objects.get_or_create(user=user)[0]
context['profile'] = profile
context['request'] = request
context['user'] = user
if request.user.pk == int(pk) or request.user.is_staff:
return render(request, 'user.html', context)
else:
return HttpResponseRedirect(reverse('home'))
@staff_member_required
def user_contact(request, pk=None):
return add_user_to_contacts(request, Contact, pk=pk)
@login_required
def user_edit(request, pk=None):
context = {}
user = get_object_or_404(User, pk=pk)
context['user'] = user
return edit(request,
ProfileForm,
Profile,
'home',
'user_edit.html',
pk=pk,
context=context)
@staff_member_required
def user_index(request):
context = {}
items = User.objects.all()
context['items'] = items
return render(request, 'user_index.html', context)
Update
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.conf import settings
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django_xhtml2pdf.utils import generate_pdf
from .forms import ClientForm
from .forms import CompanyForm
from .forms import ContactForm
from .forms import EstimateForm
from .forms import InvoiceForm
from .forms import MailForm
from .forms import ProfileForm
from .forms import ProjectForm
from .forms import TaskForm
from .models import Client
from .models import Company
from .models import Contact
from .models import Estimate
from .models import Invoice
from .models import Profile
from .models import Project
from .models import Task
from .models import Time
from .utils import add_user_to_contacts
from .utils import dashboard_total
from .utils import edit
from .utils import entries_total
from .utils import last_month
from .utils import search
# Create your views here.
@staff_member_required
def client(request, pk=None):
context = {}
client = get_object_or_404(Client, pk=pk)
contacts = Contact.objects.filter(client=client, active=True)
contacts = contacts.order_by('-pk')
projects = Project.objects.filter(client=client)
projects = projects.order_by('-start_date')
context['client'] = client
context['contacts'] = contacts
context['projects'] = projects
return render(request, 'client.html', context)
@staff_member_required
def client_edit(request, pk=None):
kwargs = {}
url_name = 'client_index'
if pk:
kwargs['pk'] = pk
url_name = 'client'
return edit(request,
ClientForm,
Client,
url_name,
'client_edit.html',
kwargs=kwargs,
pk=pk)
@staff_member_required
def client_index(request):
context = {}
active = request.GET.get('active')
if active:
context['active'] = True
fields = ('address', 'name')
order_by = '-pk'
context, items = search(request,
Client,
fields,
order_by=order_by,
context=context)
context['items'] = items
return render(request, 'client_index.html', context)
@staff_member_required
def company_edit(request, pk=None):
return edit(request,
CompanyForm,
Company,
'company',
'company_edit.html',
pk=1)
@staff_member_required
def company(request):
context = {}
company = Company.get_solo()
context['company'] = company
return render(request, 'company.html', context)
@staff_member_required
def contact(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
context['contact'] = contact
return render(request, 'contact.html', context)
@staff_member_required
def contact_edit(request, pk=None):
url_name = 'contact_index'
kwargs = {}
if pk:
kwargs['pk'] = pk
url_name = 'contact'
client = request.GET.get('client')
if client:
client = get_object_or_404(Client, pk=client)
url_name = 'client_index'
return edit(request,
ContactForm,
Contact,
url_name,
'contact_edit.html',
client=client,
kwargs=kwargs,
pk=pk)
@staff_member_required
def contact_index(request):
context = {}
active = request.GET.get('active')
if active:
context['active'] = True
fields = ('first_name', 'last_name', 'email', 'notes')
order_by = '-pk'
context, items = search(request,
Contact,
fields,
order_by=order_by,
context=context)
context['items'] = items
return render(request, 'contact_index.html', context)
@staff_member_required
def contact_mail(request, pk=None):
context = {}
recipients = []
contact = get_object_or_404(Contact, pk=pk)
if request.method == 'POST':
form = MailForm(request.POST)
if form.is_valid():
sender = settings.DEFAULT_FROM_EMAIL
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
recipients.append(contact.email)
send_mail(subject,
message,
sender,
recipients,
fail_silently=False)
messages.add_message(request, messages.SUCCESS, 'Message sent!')
return HttpResponseRedirect(reverse('contact_index'))
else:
form = MailForm()
context['form'] = form
context['contact'] = contact
return render(request, 'contact_mail.html', context)
@staff_member_required
def estimate(request, pk=None):
context = {}
company = Company.get_solo()
if company:
context['company'] = company
estimate = get_object_or_404(Estimate, pk=pk)
document_id = str(estimate.document_id)
document_type = estimate._meta.verbose_name
document_type_upper = document_type.upper()
document_type_title = document_type.title()
context['document'] = estimate
context['document_type_upper'] = document_type_upper
context['document_type_title'] = document_type_title
times_client = Time.objects.filter(client=estimate.client,
estimate=None,
project=None)
times_estimate = Time.objects.filter(estimate=estimate)
times = times_client | times_estimate
times = times.order_by('-pk')
entries, subtotal, paid_amount, hours, amount = entries_total(times)
context['entries'] = entries
context['amount'] = amount
context['paid_amount'] = paid_amount
context['subtotal'] = subtotal
context['hours'] = hours
pdf = request.GET.get('pdf')
context['pdf'] = pdf
if pdf:
company_name = ''
if company.name:
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
response = HttpResponse(content_type='application/pdf')
filename = '_'.join([document_type_upper, document_id, company_name])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf('entry_table.html',
context=context,
file_object=response)
else:
return render(request, 'estimate.html', context)
@staff_member_required
def estimate_edit(request, pk=None):
amount = request.GET.get('amount')
paid_amount = request.GET.get('paid_amount')
subtotal = request.GET.get('subtotal')
times = request.GET.get('times')
company = Company.get_solo()
if times:
estimate = get_object_or_404(Estimate, pk=pk)
times = Time.objects.filter(pk__in=[int(i) for i in times.split(',')])
for entry in times:
entry.estimate = estimate
entry.save()
return edit(request,
EstimateForm,
Estimate,
'estimate_index',
'estimate_edit.html',
amount=amount,
paid_amount=paid_amount,
pk=pk,
subtotal=subtotal,
company=company)
@staff_member_required
def estimate_index(request):
context = {}
fields = ('subject', )
order_by = '-pk'
context, items = search(request, Estimate, fields, order_by=order_by)
context['items'] = items
return render(request, 'estimate_index.html', context)
def home(request):
context = {}
clients = Client.objects.all()
clients_active = Client.objects.filter(active=True)
company = Company.get_solo()
contacts = Contact.objects.all()
contacts_active = Contact.objects.filter(active=True)
projects = Project.objects.all()
projects_active = Project.objects.filter(active=True)
projects_active = projects_active.order_by('-start_date')
tasks = Task.objects.all()
tasks_active = Task.objects.filter(active=True)
times = Time.objects.all()
invoices = Invoice.objects.all()
invoices_active = Invoice.objects.filter(
issue_date__gt=last_month()).order_by('-document_id')
gross, net = dashboard_total(invoices_active)
estimates = Estimate.objects.all()
context['clients'] = clients
context['clients_active'] = clients_active
context['company'] = company
context['contacts'] = contacts
context['contacts_active'] = contacts_active
context['projects'] = projects
context['projects_active'] = projects_active
context['tasks'] = tasks
context['tasks_active'] = tasks_active
context['times'] = times
context['invoices'] = invoices
context['invoices_active'] = invoices_active
context['gross'] = gross
context['net'] = net
context['estimates'] = estimates
context['request'] = request
return render(request, 'home.html', context)
@staff_member_required
def invoice(request, pk=None):
context = {}
company = Company.get_solo()
if company:
context['company'] = company
invoice = get_object_or_404(Invoice, pk=pk)
document_id = str(invoice.document_id)
document_type = invoice._meta.verbose_name
document_type_upper = document_type.upper()
document_type_title = document_type.title()
context['document'] = invoice
context['document_type_upper'] = document_type_upper
context['document_type_title'] = document_type_title
times_project = Time.objects.filter(invoiced=False,
project=invoice.project,
invoice=None)
times_invoice = Time.objects.filter(invoice=invoice)
times = times_project | times_invoice
times = times.order_by('-pk')
entries, subtotal, paid_amount, hours, amount = entries_total(times)
context['entries'] = entries
context['amount'] = amount
context['paid_amount'] = paid_amount
context['subtotal'] = subtotal
context['hours'] = hours
context['invoice'] = True
pdf = request.GET.get('pdf')
context['pdf'] = pdf
if pdf:
response = HttpResponse(content_type='application/pdf')
if company.name:
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
filename = '_'.join([document_type_upper, document_id, company_name])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf('entry_table.html',
context=context,
file_object=response)
else:
return render(request, 'invoice.html', context)
@staff_member_required
def invoice_edit(request, pk=None):
amount = request.GET.get('amount')
paid_amount = request.GET.get('paid_amount')
subtotal = request.GET.get('subtotal')
times = request.GET.get('times')
company = Company.get_solo()
if pk:
invoice = get_object_or_404(Invoice, pk=pk)
if invoice.project:
if invoice.project.client and not invoice.client:
invoice.client = invoice.project.client
invoice.save()
if times:
invoice = get_object_or_404(Invoice, pk=pk)
times = Time.objects.filter(pk__in=[int(i) for i in times.split(',')])
for entry in times:
entry.invoice = invoice
entry.save()
return edit(request,
InvoiceForm,
Invoice,
'invoice_index',
'invoice_edit.html',
amount=amount,
paid_amount=paid_amount,
pk=pk,
subtotal=subtotal,
company=company)
@staff_member_required
def invoice_index(request):
context = {}
fields = ('client__name',
'document_id',
'issue_date',
'project__name',
'subject', )
order_by = '-issue_date'
context, items = search(request, Invoice, fields, order_by=order_by)
context['items'] = items
return render(request, 'invoice_index.html', context)
@staff_member_required
def project(request, pk=None):
context = {}
project = get_object_or_404(Project, pk=pk)
times = Time.objects.filter(project=project).order_by('-date')
context['project'] = project
context['times'] = times
return render(request, 'project.html', context)
@staff_member_required
def project_edit(request, pk=None):
url_name = 'project_index'
kwargs = {}
if pk:
kwargs['pk'] = pk
url_name = 'project'
client = request.GET.get('client')
if client:
client = get_object_or_404(Client, pk=client)
url_name = 'client_index'
return edit(request,
ProjectForm,
Project,
url_name,
'project_edit.html',
client=client,
kwargs=kwargs,
pk=pk)
@staff_member_required
def project_index(request, pk=None):
context = {}
active = request.GET.get('active')
if active:
context['active'] = True
fields = ('id', 'name')
order_by = '-start_date'
context, items = search(request,
Project,
fields,
order_by=order_by,
context=context)
context['items'] = items
return render(request, 'project_index.html', context)
@staff_member_required
def task(request, pk=None):
context = {}
task = get_object_or_404(Task, pk=pk)
context['task'] = task
return render(request, 'task.html', context)
@staff_member_required
def task_edit(request, pk=None):
kwargs = {}
url_name = 'task_index'
if pk:
kwargs['pk'] = pk
url_name = 'task'
return edit(request,
TaskForm,
Task,
url_name,
'task_edit.html',
pk=pk,
kwargs=kwargs)
@staff_member_required
def task_index(request):
context = {}
active = request.GET.get('active')
if active:
context['active'] = True
order_by = '-pk'
fields = ('name', )
context, items = search(request, Task, fields, order_by=order_by)
context['items'] = items
return render(request, 'task_index.html', context)
@login_required
def time(request, pk=None):
context = {}
entry = get_object_or_404(Time, pk=pk)
if not entry.user and not request.user.is_staff:
return HttpResponseRedirect(reverse('admin:index'))
if entry.user:
if (not entry.user.username == request.user.username and
not request.user.is_staff):
return HttpResponseRedirect(reverse('admin:index'))
context['entry'] = entry
return render(request, 'time.html', context)
@login_required
def time_edit(request, pk=None):
url_name = 'entry_index'
kwargs = {}
if pk:
kwargs['pk'] = pk
url_name = 'entry'
client = request.GET.get('client')
project = request.GET.get('project')
task = None
if client:
client = get_object_or_404(Client, pk=client)
if project:
project = get_object_or_404(Project, pk=project)
if project.task:
task = get_object_or_404(Task, pk=project.task.pk)
projects = Project.objects.filter(team=request.user.pk)
clients = Client.objects.filter(
pk__in=[i.client.pk for i in projects if i.client])
tasks = Task.objects.filter(pk__in=[i.task.pk for i in projects if i.task])
if request.user.is_staff:
from .forms import TimeAdminForm as TimeForm
else:
from .forms import TimeForm
return edit(request,
TimeForm,
Time,
url_name,
'time_edit.html',
client=client,
clients=clients,
pk=pk,
project=project,
projects=projects,
task=task,
tasks=tasks,
kwargs=kwargs)
@login_required
def time_index(request):
context = {}
fields = ('client__name', 'date', 'notes', 'pk', 'project__name',
'user__username')
order_by = '-pk'
context, items = search(request, Time, fields, order_by=order_by)
context['items'] = items
return render(request, 'time_index.html', context)
@login_required
def user(request, pk=None):
context = {}
user = get_object_or_404(User, pk=pk)
profile = Profile.objects.get_or_create(user=user)[0]
context['profile'] = profile
context['request'] = request
context['user'] = user
if request.user.pk == int(pk) or request.user.is_staff:
return render(request, 'user.html', context)
else:
return HttpResponseRedirect(reverse('home'))
@staff_member_required
def user_contact(request, pk=None):
return add_user_to_contacts(request, Contact, pk=pk)
@login_required
def user_edit(request, pk=None):
context = {}
user = get_object_or_404(User, pk=pk)
context['user'] = user
return edit(request,
ProfileForm,
Profile,
'home',
'user_edit.html',
pk=pk,
context=context)
@staff_member_required
def user_index(request):
context = {}
items = User.objects.all()
context['items'] = items
return render(request, 'user_index.html', context)
|
from .forms import ClientForm
from .forms import CompanyForm
from .forms import ContactForm
from .forms import ContractForm
from .forms import ContractSettingsForm
from .forms import EstimateForm
from .forms import InvoiceForm
from .forms import MailForm
from .forms import NewsletterForm
from .forms import NoteForm
from .forms import ProfileForm
from .forms import ProjectForm
from .forms import ProposalForm
from .forms import ReportForm
from .forms import SettingsForm
from .forms import TaskForm
from .models import Client
from .models import Company
from .models import Contact
from .models import Contract
from .models import ContractSettings
from .models import Estimate
from .models import Invoice
from .models import Log
from .models import Newsletter
from .models import Note
from .models import Profile
from .models import Project
from .models import Proposal
from .models import Report
from .models import Service
from .models import Settings
from .models import Testimonial
from .models import Task
from .models import Time
from .serializers import ClientSerializer
from .serializers import ProfileSerializer
from .serializers import ServiceSerializer
from .serializers import TestimonialSerializer
from .utils import add_user_to_contacts
from .utils import index_items
from .utils import dashboard_totals
from .utils import edit
from .utils import entries_total
from .utils import generate_doc
from .utils import get_filename
from .utils import get_setting
from .utils import get_query
from .utils import get_url_name
from .utils import send_mail
from datetime import datetime
from django.conf import settings as django_settings
from django.contrib import messages
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models import F, Sum
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django_xhtml2pdf.utils import generate_pdf
from faker import Faker
from io import BytesIO
from matplotlib.dates import DateFormatter
from matplotlib.dates import MonthLocator
from matplotlib.dates import date2num
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from rest_framework import viewsets
# Create your views here.
class ClientViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Client.objects.filter(published=True).order_by('name')
serializer_class = ClientSerializer
class ServiceViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Service.objects.filter(active=True).order_by('name')
serializer_class = ServiceSerializer
class TestimonialViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Testimonial.objects.filter(active=True).order_by('-issue_date')
serializer_class = TestimonialSerializer
class ProfileViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Profile.objects.filter(
published=True).order_by('user__first_name')
serializer_class = ProfileSerializer
@staff_member_required
def client(request, pk=None):
context = {}
settings = Settings.get_solo()
client = get_object_or_404(Client, pk=pk)
contacts = Contact.objects.filter(client=client)
contacts = contacts.order_by('-pk')
contracts = Contract.objects.filter(client=client)
contracts = contracts.order_by('-updated')
projects = Project.objects.filter(client=client)
projects = projects.order_by('-start_date')
context['active_nav'] = 'client'
context['edit_url'] = 'client_edit'
context['icon_size'] = get_setting(request, settings, 'icon_size')
context['item'] = client
context['contacts'] = contacts
context['contracts'] = contracts
context['projects'] = projects
return render(request, 'client.html', context)
@staff_member_required
def client_edit(request, pk=None):
kwargs, url_name = get_url_name('client', page_type='index_or_edit', pk=pk)
return edit(
request,
ClientForm,
Client,
url_name,
'client_edit.html',
active_nav='client',
kwargs=kwargs,
pk=pk)
@staff_member_required
def client_index(request):
search_fields = ('address', 'name')
settings = Settings.get_solo()
context = index_items(
request,
Client,
search_fields,
active_nav='client',
app_settings=settings,
edit_url='client_edit', # Delete modal
order_by=('-active', 'name'),
show_search=True)
return render(request, 'client_index.html', context)
@staff_member_required
def company_edit(request, pk=None):
return edit(
request, CompanyForm, Company, 'company', 'company_edit.html', pk=1)
@staff_member_required
def company(request):
context = {}
company = Company.get_solo()
context['company'] = company
context['active_tab'] = 'company'
return render(request, 'company.html', context)
@staff_member_required
def contact(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
context['active_nav'] = 'contact'
context['edit_url'] = 'contact_edit' # Delete modal
context['item'] = contact
return render(request, 'contact.html', context)
@staff_member_required
def contact_edit(request, pk=None):
kwargs, url_name = get_url_name(
'contact', page_type='index_or_edit', pk=pk)
return edit(
request,
ContactForm,
Contact,
url_name,
'contact_edit.html',
active_nav='contact',
client=client,
kwargs=kwargs,
pk=pk)
@staff_member_required
def contact_index(request):
settings = Settings.get_solo()
search_fields = ('first_name', 'last_name', 'email', 'notes')
context = index_items(
request,
Contact,
search_fields,
active_nav='contact',
app_settings=settings,
edit_url='contact_edit', # Delete modal
order_by=('-active', 'first_name'),
show_search=True)
return render(request, 'contact_index.html', context)
@staff_member_required
def contact_mail(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
if request.method == 'POST':
form = MailForm(request.POST)
if form.is_valid():
test = form.cleaned_data['test']
if test:
fake = Faker()
subject = fake.text()
message = fake.text()
else:
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
url = reverse('contact_unsubscribe', kwargs={'pk': pk})
url = ''.join([request.get_host(), url])
to = contact.email
first_name = contact.first_name
if send_mail(
request,
subject,
message,
to,
url=url,
uuid=contact.uuid,
first_name=first_name):
messages.add_message(request, messages.SUCCESS, 'Mail sent!')
log = Log(entry='Mail sent to %s.' % to)
log.save()
return HttpResponseRedirect(reverse('contact', kwargs={'pk': pk}))
else:
form = MailForm()
context['active_nav'] = 'contact'
context['contact'] = contact
context['form'] = form
return render(request, 'contact_mail.html', context)
def contact_unsubscribe(request, pk=None):
contact = get_object_or_404(Contact, pk=pk)
uuid = request.GET.get('id')
if uuid == contact.uuid:
contact.subscribed = False
contact.save()
messages.add_message(request, messages.SUCCESS,
'You have been unsubscribed!')
log = Log(entry='%s unsubscribed.' % contact.email)
log.save()
return HttpResponseRedirect(reverse('home'))
else:
messages.add_message(request, messages.WARNING, 'Nothing to see here.')
return HttpResponseRedirect(reverse('home'))
@staff_member_required
def contract(request, pk=None):
"""
"""
doc = get_query(request, 'doc')
pdf = get_query(request, 'pdf')
company = Company.get_solo()
context = {}
contract = get_object_or_404(Contract, pk=pk)
context['active_nav'] = 'contract'
context['company'] = company
context['edit_url'] = 'contract_edit'
context['item'] = contract
context['pdf'] = pdf
# XXX In hindsight, this[1] is terrible. Maybe some OneToOne fields
# could clean this up.
# [1] i.e. The current implementation of time entry association with
# estimates & invoices for the purpose of "populating" those
# documents with line items.
estimate = contract.statement_of_work
if estimate:
times_client = Time.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = Time.objects.filter(estimate=estimate)
times = times_client | times_estimate
times = times.order_by('-date')
else:
times = None
context['times'] = times
if pdf:
response = HttpResponse(content_type='application/pdf')
filename = get_filename(company)
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_contract.html', context=context, file_object=response)
if doc:
# https://stackoverflow.com/a/24122313/185820
document = generate_doc(contract)
filename = get_filename(company)
f = BytesIO()
document.save(f)
length = f.tell()
f.seek(0)
content_type = 'application/vnd.openxmlformats-'
content_type += 'officedocument.wordprocessingml.document'
response = HttpResponse(f.getvalue(), content_type=content_type)
response['Content-Disposition'] = 'filename=%s.docx' % filename
response['Content-Length'] = length
return response
else:
return render(request, 'contract.html', context)
@staff_member_required
def contract_edit(request, pk=None):
"""
"""
contract_settings = ContractSettings.get_solo()
kwargs, url_name = get_url_name(
'contract', page_type='index_or_edit', pk=pk)
return edit(
request,
ContractForm,
Contract,
url_name,
'contract_edit.html',
active_nav='contract',
contract_settings=contract_settings,
kwargs=kwargs,
pk=pk)
@staff_member_required
def contract_index(request):
"""
"""
settings = Settings.get_solo()
search_fields = ()
context = index_items(
request,
Contract,
search_fields,
active_nav='contract',
app_settings=settings,
order_by=('-created', ))
return render(request, 'contract_index.html', context)
@staff_member_required
def contract_settings(request):
context = {}
contract_settings = ContractSettings.get_solo()
fields = {}
for field in contract_settings._meta.fields:
if field.description == 'Text' and field.name != 'body':
fields[field.name] = {}
fields[field.name]['name'] = field.verbose_name
fields[field.name]['value'] = getattr(contract_settings,
field.name)
context['fields'] = fields
context['active_tab'] = 'contract'
return render(request, 'contract_settings.html', context)
@staff_member_required
def contract_settings_edit(request, pk=None):
return edit(
request,
ContractSettingsForm,
ContractSettings,
'contract_settings',
'contract_settings_edit.html',
pk=1,
active_nav='contract')
@staff_member_required
def estimate(request, pk=None):
context = {}
company = Company.get_solo()
if company:
context['company'] = company
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
estimate = get_object_or_404(Estimate, pk=pk)
document_id = str(estimate.document_id)
document_type = estimate._meta.verbose_name
document_type_upper = document_type.upper()
document_type_title = document_type.title()
context['active_nav'] = 'estimate'
context['document_type_upper'] = document_type_upper
context['document_type_title'] = document_type_title
context['edit_url'] = 'estimate_edit'
context['item'] = estimate
times_client = Time.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = Time.objects.filter(estimate=estimate)
times = times_client | times_estimate
times = times.order_by('-date')
entries, subtotal, paid_amount, hours, amount = entries_total(times)
context['entries'] = entries
context['amount'] = amount
context['paid_amount'] = paid_amount
context['subtotal'] = subtotal
context['hours'] = hours
if pdf:
company_name = ''
if company.name:
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
response = HttpResponse(content_type='application/pdf')
filename = '_'.join([document_type_upper, document_id, company_name])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'estimate.html', context)
@staff_member_required
def estimate_edit(request, pk=None):
amount = request.GET.get('amount')
paid_amount = request.GET.get('paid_amount')
subtotal = request.GET.get('subtotal')
times = request.GET.get('times')
company = Company.get_solo()
kwargs, url_name = get_url_name(
'estimate', page_type='index_or_edit', pk=pk)
if times:
estimate = get_object_or_404(Estimate, pk=pk)
times = Time.objects.filter(pk__in=[int(i) for i in times.split(',')])
for entry in times:
entry.estimate = estimate
entry.save()
return edit(
request,
EstimateForm,
Estimate,
url_name,
'estimate_edit.html',
active_nav='estimate',
amount=amount,
company=company,
kwargs=kwargs,
paid_amount=paid_amount,
pk=pk,
subtotal=subtotal)
@staff_member_required
def estimate_index(request):
company = Company.get_solo()
settings = Settings.get_solo()
search_fields = ('subject', )
context = index_items(
request,
Estimate,
search_fields,
active_nav='estimate',
app_settings=settings,
edit_url='estimate_edit', # Delete modal
order_by=('-issue_date', ),
show_search=True)
context['company'] = company
return render(request, 'estimate_index.html', context)
@staff_member_required
def estimate_mail(request, pk=None):
to = django_settings.EMAIL_FROM
estimate = get_object_or_404(Estimate, pk=pk)
notes = '<ul><li>'
counter = 0
hours = 0
rate = estimate.project.task.rate
start_date = estimate.project.start_date
end_date = estimate.project.end_date
for entry in estimate.time_set.all():
if counter != 0:
notes += '</li><li>%s <strong>%s hours</strong>.' % (entry.notes, entry.hours)
else:
notes += '%s <strong>%s hours</strong>.' % (entry.notes, entry.hours)
counter += 1
hours += entry.hours
notes += '</li></ul>'
cost = hours * rate
message = ''.join([
'<h1 style="text-align: center">Statement of Work</h1><h2>%s '
'total hours of %s @ $%s/hour for %s = $%.2f from %s to %s.</h2>' %
(hours, estimate.subject, rate, estimate.client.name, cost, start_date,
end_date), notes
])
if send_mail(request, 'Statement of Work for %s' % estimate.description, message, to):
messages.add_message(request, messages.SUCCESS, 'Mail sent!')
log = Log(entry='Estimate sent to %s.' % to)
log.save()
return HttpResponseRedirect(reverse('estimate', kwargs={'pk': pk}))
def home(request):
company = Company.get_solo()
settings = Settings.get_solo()
gross, net, invoices_active = dashboard_totals(Invoice)
context = {}
invoices = Invoice.objects.filter(
last_payment_date=None).order_by('amount')
notes = Note.objects.filter(active=True).order_by('-created', 'note',
'due_date', 'priority')
projects = Project.objects.filter(active=True)
plot_items = Report.objects.filter(active=True)
context['edit_url'] = 'project_edit' # Delete modal
context['company'] = company
context['dashboard_choices'] = get_setting(request, settings,
'dashboard_choices')
context['invoices'] = invoices
context['icon_size'] = get_setting(request, settings, 'icon_size')
context['gross'] = gross
context['net'] = net
context['notes'] = notes
context['nav_status'] = 'active'
context['projects'] = projects
context['settings'] = settings
context['plot_items'] = plot_items
return render(request, 'home.html', context)
@staff_member_required
def invoice(request, pk=None):
context = {}
company = Company.get_solo()
if company:
context['company'] = company
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
invoice = get_object_or_404(Invoice, pk=pk)
document_id = str(invoice.document_id)
document_type = invoice._meta.verbose_name
document_type_upper = document_type.upper()
document_type_title = document_type.title()
context['active_nav'] = 'invoice'
context['document_type_upper'] = document_type_upper
context['document_type_title'] = document_type_title
context['edit_url'] = 'invoice_edit' # Delete modal
context['item'] = invoice
times_project = Time.objects.filter(
invoiced=False, project=invoice.project, estimate=None, invoice=None)
times_invoice = Time.objects.filter(invoice=invoice)
times = times_project | times_invoice
times = times.order_by('-date')
entries, subtotal, paid_amount, hours, amount = entries_total(times)
last_payment_date = invoice.last_payment_date
context['amount'] = amount
context['entries'] = entries
context['hours'] = hours
context['invoice'] = True
context['last_payment_date'] = last_payment_date
context['paid_amount'] = paid_amount
context['subtotal'] = subtotal
if pdf:
response = HttpResponse(content_type='application/pdf')
if company.name:
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
else:
company_name = 'COMPANY'
filename = '_'.join([document_type_upper, document_id, company_name])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'invoice.html', context)
@staff_member_required
def invoice_edit(request, pk=None):
amount = request.GET.get('amount')
paid_amount = request.GET.get('paid_amount')
subtotal = request.GET.get('subtotal')
times = request.GET.get('times')
paid = request.GET.get('paid')
company = Company.get_solo()
project = request.GET.get('project')
url_name = 'invoice_index'
kwargs, url_name = get_url_name(
'invoice', page_type='index_or_edit', pk=pk)
invoice = get_object_or_404(Invoice, pk=pk)
project = get_object_or_404(Project, pk=project)
if invoice.project:
if invoice.project.client and not invoice.client:
invoice.client = invoice.project.client
invoice.save()
if paid and times:
times = Time.objects.filter(pk__in=[int(i) for i in times.split(',')])
for entry in times:
entry.invoiced = True
entry.save()
elif times:
invoice = get_object_or_404(Invoice, pk=pk)
times = Time.objects.filter(pk__in=[int(i) for i in times.split(',')])
for entry in times:
entry.invoice = invoice
entry.save()
return edit(
request,
InvoiceForm,
Invoice,
url_name,
'invoice_edit.html',
active_nav='invoice',
amount=amount,
company=company,
kwargs=kwargs,
paid_amount=paid_amount,
paid=paid,
pk=pk,
project=project,
subtotal=subtotal)
@staff_member_required
def invoice_index(request):
company = Company.get_solo()
settings = Settings.get_solo()
search_fields = (
'client__name',
'document_id',
'issue_date',
'project__name',
'subject', )
context = index_items(
request,
Invoice,
search_fields,
active_nav='invoice',
app_settings=settings,
edit_url='invoice_edit', # Delete modal
order_by=('-issue_date', ),
show_search=True)
context['company'] = company
return render(request, 'invoice_index.html', context)
def login(request):
context = {}
context['login'] = True
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
# https://stackoverflow.com/a/39316967/185820
auth_login(request, user)
return HttpResponseRedirect(reverse('home'))
else:
messages.add_message(request, messages.WARNING, 'Login failed.')
return HttpResponseRedirect(reverse('home'))
return render(request, 'login.html', context)
@staff_member_required
def log_index(request):
settings = Settings.get_solo()
search_fields = ('entry', )
context = index_items(
request,
Log,
search_fields,
order_by=('-created', ),
app_settings=settings)
return render(request, 'log_index.html', context)
@staff_member_required
def newsletter(request, pk=None):
"""
"""
context = {}
newsletter = get_object_or_404(Newsletter, pk=pk)
context['active_nav'] = 'newsletter'
context['contacts'] = newsletter.contacts.all().order_by('first_name')
context['edit_url'] = 'newsletter_edit'
context['item'] = newsletter
return render(request, 'newsletter.html', context)
@staff_member_required
def newsletter_edit(request, pk=None):
"""
"""
kwargs, url_name = get_url_name(
'newsletter', page_type='index_or_edit', pk=pk)
return edit(
request,
NewsletterForm,
Newsletter,
url_name,
'newsletter_edit.html',
active_nav='newsletter',
kwargs=kwargs,
pk=pk)
@staff_member_required
def newsletter_index(request, pk=None):
"""
"""
settings = Settings.get_solo()
search_fields = ('text', )
context = index_items(
request,
Newsletter,
search_fields,
active_nav='newsletter',
app_settings=settings,
order_by=('-created', ))
return render(request, 'newsletter_index.html', context)
@staff_member_required
def newsletter_send(request, pk=None):
"""
"""
context = {}
newsletter = get_object_or_404(Newsletter, pk=pk)
contacts = newsletter.contacts.all().order_by('first_name')
for contact in contacts:
url = reverse('contact_unsubscribe', kwargs={'pk': contact.pk})
url = ''.join([request.get_host(), url])
to = contact.email
first_name = contact.first_name
subject = newsletter.subject
message = newsletter.text
if send_mail(
request,
subject,
message,
to,
url=url,
uuid=contact.uuid,
first_name=first_name):
log = Log(entry='Mail sent to %s.' % to)
log.save()
messages.add_message(request, messages.SUCCESS, 'Batch mail sent!')
context['active_nav'] = 'newsletter'
context['contacts'] = contacts
context['edit_url'] = 'newsletter_edit'
context['item'] = newsletter
return render(request, 'newsletter.html', context)
@staff_member_required
def note(request, pk=None):
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
note = get_object_or_404(Note, pk=pk)
notes = Note.objects.filter(note=note)
notes = notes.order_by('-pk')
context['active_nav'] = 'note'
context['edit_url'] = 'note_edit'
context['item'] = note
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=note-%s.pdf' % pk
return generate_pdf(
'pdf_note.html', context=context, file_object=response)
else:
return render(request, 'note.html', context)
@staff_member_required
def note_edit(request, pk=None):
company = Company.get_solo()
kwargs, url_name = get_url_name('note', page_type='index_or_edit', pk=pk)
return edit(
request,
NoteForm,
Note,
url_name,
'note_edit.html',
active_nav='note',
company=company,
kwargs=kwargs,
pk=pk)
@staff_member_required
def note_index(request, pk=None):
settings = Settings.get_solo()
search_fields = ('note', )
filters = {'hidden': False, }
context = index_items(
request,
Note,
search_fields,
active_nav='note',
app_settings=settings,
filters=filters,
order_by=('-active', '-created', 'note', 'due_date', 'priority'),
show_search=True)
context['edit_url'] = 'note_edit' # Delete modal
return render(request, 'note_index.html', context)
@staff_member_required
def project(request, pk=None):
settings = Settings.get_solo()
context = {}
project = get_object_or_404(Project, pk=pk)
times = Time.objects.filter(
project=project, invoiced=False).order_by('-date')
estimates = Estimate.objects.filter(project=project)
invoices = Invoice.objects.filter(project=project)
context['active_nav'] = 'project'
context['company'] = Company.get_solo()
context['edit_url'] = 'project_edit' # Delete modal
context['icon_size'] = get_setting(request, settings, 'icon_size')
context['estimates'] = estimates
context['invoices'] = invoices
context['item'] = project
context['times'] = times
return render(request, 'project.html', context)
@staff_member_required
def project_edit(request, pk=None):
# client = request.GET.get('client')
# client = get_object_or_404(Client, pk=client)
# clients = Client.objects.filter(active=True)
kwargs, url_name = get_url_name(
'project', page_type='index_or_edit', pk=pk)
return edit(
request,
ProjectForm,
Project,
url_name,
'project_edit.html',
active_nav='project',
# client=client,
# clients=clients,
kwargs=kwargs,
pk=pk)
@staff_member_required
def project_index(request, pk=None):
settings = Settings.get_solo()
search_fields = ('id', 'name')
context = index_items(
request,
Project,
search_fields,
active_nav='project',
app_settings=settings,
edit_url='project_edit', # Delete modal
order_by=('-active', ),
show_search=True)
return render(request, 'project_index.html', context)
@staff_member_required
def proposal(request, pk=None):
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
proposal = get_object_or_404(Proposal, pk=pk)
context['active_nav'] = 'proposal'
context['edit_url'] = 'proposal_edit'
context['item'] = proposal
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=proposal-%s.pdf' % pk
return generate_pdf(
'pdf_proposal.html', context=context, file_object=response)
else:
return render(request, 'proposal.html', context)
def proposal_edit(request, pk=None):
"""
"""
company = Company.get_solo()
kwargs, url_name = get_url_name(
'proposal', page_type='index_or_edit', pk=pk)
return edit(
request,
ProposalForm,
Proposal,
url_name,
'proposal_edit.html',
active_nav='proposal',
company=company,
kwargs=kwargs,
pk=pk)
@staff_member_required
def proposal_index(request, pk=None):
settings = Settings.get_solo()
search_fields = ()
context = index_items(
request,
Proposal,
search_fields,
active_nav='proposal',
app_settings=settings,
show_search=True)
context['edit_url'] = 'proposal_edit' # Delete modal
return render(request, 'proposal_index.html', context)
@staff_member_required
def report(request, pk=None):
company = Company.get_solo()
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
report = get_object_or_404(Report, pk=pk)
reports = Report.objects.filter(active=True)
reports = reports.aggregate(gross=Sum(F('gross')), net=Sum(F('net')))
context['active_nav'] = 'report'
context['company'] = company
context['cost'] = report.gross - report.net
context['edit_url'] = 'report_edit' # Delete modal
context['item'] = report
context['reports'] = reports
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=report-%s.pdf' % pk
return generate_pdf(
'pdf_report.html', context=context, file_object=response)
else:
return render(request, 'report.html', context)
@staff_member_required
def report_edit(request, pk=None):
gross, net, invoices_active = dashboard_totals(Invoice)
kwargs, url_name = get_url_name('report', page_type='index_or_edit', pk=pk)
return edit(
request,
ReportForm,
Report,
url_name,
'report_edit.html',
active_nav='report',
gross=gross,
invoices_active=invoices_active,
kwargs=kwargs,
net=net,
pk=pk)
@staff_member_required
def report_index(request):
settings = Settings.get_solo()
show_plot = False
reports = Report.objects.filter(active=True)
plot_items = reports # Save for plotting
reports = reports.aggregate(gross=Sum(F('gross')), net=Sum(F('net')))
company = Company.get_solo()
search_fields = ('id', 'name', 'gross', 'net')
context = index_items(
request,
Report,
search_fields,
active_nav='report',
app_settings=settings,
edit_url='report_edit', # Delete modal
order_by=('-date', ),
show_search=True)
if reports['gross'] is not None and reports['net'] is not None:
cost = reports['gross'] - reports['net']
else:
reports['gross'] = 0
reports['net'] = 0
cost = 0
if 'items' in context:
if len(context['items']) > 1:
show_plot = True
context['reports'] = reports
context['company'] = company
context['cost'] = cost
context['show_plot'] = show_plot
context['plot_items'] = plot_items
return render(request, 'report_index.html', context)
def report_plot(request): # http://stackoverflow.com/a/5515994/185820
"""
"""
values = get_query(request, 'values')
# http://matplotlib.org/examples/api/date_demo.html
x = [date2num(datetime.strptime(i[1], '%Y-%m-%d')) for i in values]
y = [i[0] for i in values]
figure = Figure()
canvas = FigureCanvasAgg(figure)
axes = figure.add_subplot(1, 1, 1)
axes.grid(True)
axes.plot(x, y)
axes.xaxis.set_major_locator(MonthLocator())
axes.xaxis.set_major_formatter(DateFormatter('%m'))
# write image data to a string buffer and get the PNG image bytes
buf = BytesIO()
canvas.print_png(buf)
data = buf.getvalue()
# write image bytes back to the browser
return HttpResponse(data, content_type="image/png")
@staff_member_required
def settings(request):
context = {}
settings = Settings.get_solo()
context['settings'] = settings
context['active_tab'] = 'system'
return render(request, 'settings.html', context)
@staff_member_required
def settings_edit(request, pk=None):
return edit(
request,
SettingsForm,
Settings,
'settings',
'settings_edit.html',
pk=1)
@staff_member_required
def task(request, pk=None):
context = {}
task = get_object_or_404(Task, pk=pk)
context['active_nav'] = 'task'
context['edit_url'] = 'task_edit' # Delete modal
context['item'] = task
return render(request, 'task.html', context)
@staff_member_required
def task_edit(request, pk=None):
kwargs, url_name = get_url_name('task', page_type='index_or_edit', pk=pk)
return edit(
request,
TaskForm,
Task,
url_name,
'task_edit.html',
active_nav='task',
pk=pk,
kwargs=kwargs)
@staff_member_required
def task_index(request):
settings = Settings.get_solo()
search_fields = ('name', )
context = index_items(
request,
Task,
search_fields,
active_nav='task',
app_settings=settings,
edit_url='task_edit', # Delete modal
order_by=('-active', ),
show_search=True)
return render(request, 'task_index.html', context)
@login_required
def time(request, pk=None):
context = {}
entry = get_object_or_404(Time, pk=pk)
if not entry.user and not request.user.is_staff:
return HttpResponseRedirect(reverse('admin:index'))
if entry.user:
if (not entry.user.username == request.user.username and
not request.user.is_staff):
return HttpResponseRedirect(reverse('admin:index'))
context['active_nav'] = 'time'
context['edit_url'] = 'entry_edit' # Delete modal
context['item'] = entry
return render(request, 'time.html', context)
@login_required
def time_edit(request, pk=None):
client = request.GET.get('client')
project = request.GET.get('project')
task = None
kwargs, url_name = get_url_name('time', page_type='index_or_edit', pk=pk)
if pk is not None:
entry = get_object_or_404(Time, pk=pk)
if entry.user:
if (entry.user.username != request.user.username and
not request.user.is_staff):
return HttpResponseRedirect(reverse('admin:index'))
else:
if not request.user.is_staff:
return HttpResponseRedirect(reverse('admin:index'))
if client:
client = get_object_or_404(Client, pk=client)
if project:
project = get_object_or_404(Project, pk=project)
if project.task:
task = get_object_or_404(Task, pk=project.task.pk)
projects = Project.objects.filter(team=request.user.pk)
clients = Client.objects.filter(
pk__in=[i.client.pk for i in projects if i.client])
tasks = Task.objects.filter(pk__in=[i.task.pk for i in projects if i.task])
if request.user.is_staff:
from .forms import TimeAdminForm as TimeForm
else:
from .forms import TimeForm
return edit(
request,
TimeForm,
Time,
url_name,
'time_edit.html',
active_nav='time',
client=client,
clients=clients,
pk=pk,
project=project,
projects=projects,
task=task,
tasks=tasks,
kwargs=kwargs)
@login_required
def time_index(request):
search_fields = ('client__name', 'date', 'notes', 'pk', 'project__name',
'invoice__document_id', 'user__username')
settings = Settings.get_solo()
context = index_items(
request,
Time,
search_fields,
active_nav='time',
app_settings=settings,
edit_url='entry_edit', # Delete modal
page_size=3,
order_by=('-date', ),
show_search=True)
if not request.user.is_staff:
return HttpResponseRedirect(reverse('admin:index'))
else:
return render(request, 'time_index.html', context)
@login_required
def user(request, pk=None):
company = Company.get_solo()
contacts = Contact.objects.all()
settings = Settings.get_solo()
user = get_object_or_404(User, pk=pk)
profile = Profile.objects.get_or_create(user=user)[0]
filters = {
'estimate': None,
'user': user,
}
search_fields = ()
context = index_items(
request,
Time,
search_fields,
order_by=('-date', ),
filters=filters,
app_settings=settings)
total_hours = context['total_hours']
if profile.rate and total_hours:
total_dollars = profile.rate * total_hours
else:
total_dollars = 0
context['active_nav'] = 'user'
context['company'] = company
context['edit_url'] = 'user_edit' # Delete modal
context['icon_size'] = get_setting(request, settings, 'icon_size')
context['item'] = user
context['profile'] = profile
context['request'] = request
context['total_dollars'] = '%.2f' % total_dollars
context['is_contact'] = user.email in [i.email for i in contacts]
# XXX One off to list projects, maybe refactor index_items to return
# multiple listings e.g.
# projects = index_items()
# times = index_items()
# context['projects'] = projects
# context['times'] = times
projects = Project.objects.filter(team__in=[user, ]).order_by('-updated')
context['projects'] = projects
if request.user.pk == int(pk) or request.user.is_staff:
return render(request, 'user.html', context)
else:
return HttpResponseRedirect(reverse('home'))
@staff_member_required
def user_contact(request, pk=None):
return add_user_to_contacts(request, Contact, pk=pk)
@login_required
def user_edit(request, pk=None):
context = {}
kwargs, url_name = get_url_name('user', page_type='index_or_edit', pk=pk)
return edit(
request,
ProfileForm,
Profile,
url_name,
'user_edit.html',
active_nav='user',
context=context,
kwargs=kwargs,
pk=pk)
@staff_member_required
def user_index(request):
company = Company.get_solo()
settings = Settings.get_solo()
# XXX FieldError at /user
# Cannot resolve keyword 'updated' into field.
# search_fields = ('first_name', 'last_name', 'email')
search_fields = ()
context = index_items(
request,
User,
search_fields,
active_nav='user',
app_settings=settings,
order_by=('-profile__active', ),
show_search=False)
context['company'] = company
# Check if user is contact
contacts = Contact.objects.all()
items = context['items']
for item in items:
if item.email in [i.email for i in contacts]:
item.is_contact = True
else:
item.is_contact = False
context['items'] = items
return render(request, 'user_index.html', context)
Update
from .forms import ClientForm
from .forms import CompanyForm
from .forms import ContactForm
from .forms import ContractForm
from .forms import ContractSettingsForm
from .forms import EstimateForm
from .forms import InvoiceForm
from .forms import MailForm
from .forms import NewsletterForm
from .forms import NoteForm
from .forms import ProfileForm
from .forms import ProjectForm
from .forms import ProposalForm
from .forms import ReportForm
from .forms import SettingsForm
from .forms import TaskForm
from .models import Client
from .models import Company
from .models import Contact
from .models import Contract
from .models import ContractSettings
from .models import Estimate
from .models import Invoice
from .models import Log
from .models import Newsletter
from .models import Note
from .models import Profile
from .models import Project
from .models import Proposal
from .models import Report
from .models import Service
from .models import Settings
from .models import Testimonial
from .models import Task
from .models import Time
from .serializers import ClientSerializer
from .serializers import ProfileSerializer
from .serializers import ServiceSerializer
from .serializers import TestimonialSerializer
from .utils import add_user_to_contacts
from .utils import index_items
from .utils import dashboard_totals
from .utils import edit
from .utils import entries_total
from .utils import generate_doc
from .utils import get_filename
from .utils import get_setting
from .utils import get_query
from .utils import get_url_name
from .utils import send_mail
from datetime import datetime
from django.conf import settings as django_settings
from django.contrib import messages
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models import F, Sum
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django_xhtml2pdf.utils import generate_pdf
from faker import Faker
from io import BytesIO
from matplotlib.dates import DateFormatter
from matplotlib.dates import MonthLocator
from matplotlib.dates import date2num
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from rest_framework import viewsets
# Create your views here.
class ClientViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Client.objects.filter(published=True).order_by('name')
serializer_class = ClientSerializer
class ServiceViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Service.objects.filter(active=True).order_by('name')
serializer_class = ServiceSerializer
class TestimonialViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Testimonial.objects.filter(active=True).order_by('-issue_date')
serializer_class = TestimonialSerializer
class ProfileViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Profile.objects.filter(
published=True).order_by('user__first_name')
serializer_class = ProfileSerializer
@staff_member_required
def client(request, pk=None):
context = {}
settings = Settings.get_solo()
client = get_object_or_404(Client, pk=pk)
contacts = Contact.objects.filter(client=client)
contacts = contacts.order_by('-pk')
contracts = Contract.objects.filter(client=client)
contracts = contracts.order_by('-updated')
projects = Project.objects.filter(client=client)
projects = projects.order_by('-start_date')
context['active_nav'] = 'client'
context['edit_url'] = 'client_edit'
context['icon_size'] = get_setting(request, settings, 'icon_size')
context['item'] = client
context['contacts'] = contacts
context['contracts'] = contracts
context['projects'] = projects
return render(request, 'client.html', context)
@staff_member_required
def client_edit(request, pk=None):
kwargs, url_name = get_url_name('client', page_type='index_or_edit', pk=pk)
return edit(
request,
ClientForm,
Client,
url_name,
'client_edit.html',
active_nav='client',
kwargs=kwargs,
pk=pk)
@staff_member_required
def client_index(request):
search_fields = ('address', 'name')
settings = Settings.get_solo()
context = index_items(
request,
Client,
search_fields,
active_nav='client',
app_settings=settings,
edit_url='client_edit', # Delete modal
order_by=('-active', 'name'),
show_search=True)
return render(request, 'client_index.html', context)
@staff_member_required
def company_edit(request, pk=None):
return edit(
request, CompanyForm, Company, 'company', 'company_edit.html', pk=1)
@staff_member_required
def company(request):
context = {}
company = Company.get_solo()
context['company'] = company
context['active_tab'] = 'company'
return render(request, 'company.html', context)
@staff_member_required
def contact(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
context['active_nav'] = 'contact'
context['edit_url'] = 'contact_edit' # Delete modal
context['item'] = contact
return render(request, 'contact.html', context)
@staff_member_required
def contact_edit(request, pk=None):
kwargs, url_name = get_url_name(
'contact', page_type='index_or_edit', pk=pk)
return edit(
request,
ContactForm,
Contact,
url_name,
'contact_edit.html',
active_nav='contact',
client=client,
kwargs=kwargs,
pk=pk)
@staff_member_required
def contact_index(request):
settings = Settings.get_solo()
search_fields = ('first_name', 'last_name', 'email', 'notes')
context = index_items(
request,
Contact,
search_fields,
active_nav='contact',
app_settings=settings,
edit_url='contact_edit', # Delete modal
order_by=('-active', 'first_name'),
show_search=True)
return render(request, 'contact_index.html', context)
@staff_member_required
def contact_mail(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
if request.method == 'POST':
form = MailForm(request.POST)
if form.is_valid():
test = form.cleaned_data['test']
if test:
fake = Faker()
subject = fake.text()
message = fake.text()
else:
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
url = reverse('contact_unsubscribe', kwargs={'pk': pk})
url = ''.join([request.get_host(), url])
to = contact.email
first_name = contact.first_name
if send_mail(
request,
subject,
message,
to,
url=url,
uuid=contact.uuid,
first_name=first_name):
messages.add_message(request, messages.SUCCESS, 'Mail sent!')
log = Log(entry='Mail sent to %s.' % to)
log.save()
return HttpResponseRedirect(reverse('contact', kwargs={'pk': pk}))
else:
form = MailForm()
context['active_nav'] = 'contact'
context['contact'] = contact
context['form'] = form
return render(request, 'contact_mail.html', context)
def contact_unsubscribe(request, pk=None):
contact = get_object_or_404(Contact, pk=pk)
uuid = request.GET.get('id')
if uuid == contact.uuid:
contact.subscribed = False
contact.save()
messages.add_message(request, messages.SUCCESS,
'You have been unsubscribed!')
log = Log(entry='%s unsubscribed.' % contact.email)
log.save()
return HttpResponseRedirect(reverse('home'))
else:
messages.add_message(request, messages.WARNING, 'Nothing to see here.')
return HttpResponseRedirect(reverse('home'))
@staff_member_required
def contract(request, pk=None):
"""
"""
doc = get_query(request, 'doc')
pdf = get_query(request, 'pdf')
company = Company.get_solo()
context = {}
contract = get_object_or_404(Contract, pk=pk)
context['active_nav'] = 'contract'
context['company'] = company
context['edit_url'] = 'contract_edit'
context['item'] = contract
context['pdf'] = pdf
# XXX In hindsight, this[1] is terrible. Maybe some OneToOne fields
# could clean this up.
# [1] i.e. The current implementation of time entry association with
# estimates & invoices for the purpose of "populating" those
# documents with line items.
estimate = contract.statement_of_work
if estimate:
times_client = Time.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = Time.objects.filter(estimate=estimate)
times = times_client | times_estimate
times = times.order_by('-date')
else:
times = None
context['times'] = times
if pdf:
response = HttpResponse(content_type='application/pdf')
filename = get_filename(company)
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_contract.html', context=context, file_object=response)
if doc:
# https://stackoverflow.com/a/24122313/185820
document = generate_doc(contract)
filename = get_filename(company)
f = BytesIO()
document.save(f)
length = f.tell()
f.seek(0)
content_type = 'application/vnd.openxmlformats-'
content_type += 'officedocument.wordprocessingml.document'
response = HttpResponse(f.getvalue(), content_type=content_type)
response['Content-Disposition'] = 'filename=%s.docx' % filename
response['Content-Length'] = length
return response
else:
return render(request, 'contract.html', context)
@staff_member_required
def contract_edit(request, pk=None):
"""
"""
contract_settings = ContractSettings.get_solo()
kwargs, url_name = get_url_name(
'contract', page_type='index_or_edit', pk=pk)
return edit(
request,
ContractForm,
Contract,
url_name,
'contract_edit.html',
active_nav='contract',
contract_settings=contract_settings,
kwargs=kwargs,
pk=pk)
@staff_member_required
def contract_index(request):
"""
"""
settings = Settings.get_solo()
search_fields = ()
context = index_items(
request,
Contract,
search_fields,
active_nav='contract',
app_settings=settings,
order_by=('-created', ))
return render(request, 'contract_index.html', context)
@staff_member_required
def contract_settings(request):
context = {}
contract_settings = ContractSettings.get_solo()
fields = {}
for field in contract_settings._meta.fields:
if field.description == 'Text' and field.name != 'body':
fields[field.name] = {}
fields[field.name]['name'] = field.verbose_name
fields[field.name]['value'] = getattr(contract_settings,
field.name)
context['fields'] = fields
context['active_tab'] = 'contract'
return render(request, 'contract_settings.html', context)
@staff_member_required
def contract_settings_edit(request, pk=None):
return edit(
request,
ContractSettingsForm,
ContractSettings,
'contract_settings',
'contract_settings_edit.html',
pk=1,
active_nav='contract')
@staff_member_required
def estimate(request, pk=None):
context = {}
company = Company.get_solo()
if company:
context['company'] = company
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
estimate = get_object_or_404(Estimate, pk=pk)
document_id = str(estimate.document_id)
document_type = estimate._meta.verbose_name
document_type_upper = document_type.upper()
document_type_title = document_type.title()
context['active_nav'] = 'estimate'
context['document_type_upper'] = document_type_upper
context['document_type_title'] = document_type_title
context['edit_url'] = 'estimate_edit'
context['item'] = estimate
times_client = Time.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = Time.objects.filter(estimate=estimate)
times = times_client | times_estimate
times = times.order_by('-date')
entries, subtotal, paid_amount, hours, amount = entries_total(times)
context['entries'] = entries
context['amount'] = amount
context['paid_amount'] = paid_amount
context['subtotal'] = subtotal
context['hours'] = hours
if pdf:
company_name = ''
if company.name:
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
response = HttpResponse(content_type='application/pdf')
filename = '_'.join([document_type_upper, document_id, company_name])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'estimate.html', context)
@staff_member_required
def estimate_edit(request, pk=None):
amount = request.GET.get('amount')
paid_amount = request.GET.get('paid_amount')
subtotal = request.GET.get('subtotal')
times = request.GET.get('times')
company = Company.get_solo()
kwargs, url_name = get_url_name(
'estimate', page_type='index_or_edit', pk=pk)
if times:
estimate = get_object_or_404(Estimate, pk=pk)
times = Time.objects.filter(pk__in=[int(i) for i in times.split(',')])
for entry in times:
entry.estimate = estimate
entry.save()
return edit(
request,
EstimateForm,
Estimate,
url_name,
'estimate_edit.html',
active_nav='estimate',
amount=amount,
company=company,
kwargs=kwargs,
paid_amount=paid_amount,
pk=pk,
subtotal=subtotal)
@staff_member_required
def estimate_index(request):
company = Company.get_solo()
settings = Settings.get_solo()
search_fields = ('subject', )
context = index_items(
request,
Estimate,
search_fields,
active_nav='estimate',
app_settings=settings,
edit_url='estimate_edit', # Delete modal
order_by=('-issue_date', ),
show_search=True)
context['company'] = company
return render(request, 'estimate_index.html', context)
@staff_member_required
def estimate_mail(request, pk=None):
to = django_settings.EMAIL_FROM
estimate = get_object_or_404(Estimate, pk=pk)
notes = '<ul><li>'
counter = 0
hours = 0
rate = estimate.project.task.rate
start_date = estimate.project.start_date
end_date = estimate.project.end_date
for entry in estimate.time_set.all():
if counter != 0:
notes += '</li><li>%s <strong>%s hours</strong>.' % (entry.notes, entry.hours)
else:
notes += '%s <strong>%s hours</strong>.' % (entry.notes, entry.hours)
counter += 1
hours += entry.hours
notes += '</li></ul>'
cost = hours * rate
message = ''.join([
'<h1 style="text-align: center">Statement of Work</h1><h2>%s '
'total hours of %s @ $%s/hour for %s = $%.2f from %s to %s.</h2>' %
(hours, estimate.subject, rate, estimate.client.name, cost, start_date,
end_date), notes
])
if send_mail(request, 'Statement of Work for %s' % estimate.subject, message, to):
messages.add_message(request, messages.SUCCESS, 'Mail sent!')
log = Log(entry='Estimate sent to %s.' % to)
log.save()
return HttpResponseRedirect(reverse('estimate', kwargs={'pk': pk}))
def home(request):
company = Company.get_solo()
settings = Settings.get_solo()
gross, net, invoices_active = dashboard_totals(Invoice)
context = {}
invoices = Invoice.objects.filter(
last_payment_date=None).order_by('amount')
notes = Note.objects.filter(active=True).order_by('-created', 'note',
'due_date', 'priority')
projects = Project.objects.filter(active=True)
plot_items = Report.objects.filter(active=True)
context['edit_url'] = 'project_edit' # Delete modal
context['company'] = company
context['dashboard_choices'] = get_setting(request, settings,
'dashboard_choices')
context['invoices'] = invoices
context['icon_size'] = get_setting(request, settings, 'icon_size')
context['gross'] = gross
context['net'] = net
context['notes'] = notes
context['nav_status'] = 'active'
context['projects'] = projects
context['settings'] = settings
context['plot_items'] = plot_items
return render(request, 'home.html', context)
@staff_member_required
def invoice(request, pk=None):
context = {}
company = Company.get_solo()
if company:
context['company'] = company
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
invoice = get_object_or_404(Invoice, pk=pk)
document_id = str(invoice.document_id)
document_type = invoice._meta.verbose_name
document_type_upper = document_type.upper()
document_type_title = document_type.title()
context['active_nav'] = 'invoice'
context['document_type_upper'] = document_type_upper
context['document_type_title'] = document_type_title
context['edit_url'] = 'invoice_edit' # Delete modal
context['item'] = invoice
times_project = Time.objects.filter(
invoiced=False, project=invoice.project, estimate=None, invoice=None)
times_invoice = Time.objects.filter(invoice=invoice)
times = times_project | times_invoice
times = times.order_by('-date')
entries, subtotal, paid_amount, hours, amount = entries_total(times)
last_payment_date = invoice.last_payment_date
context['amount'] = amount
context['entries'] = entries
context['hours'] = hours
context['invoice'] = True
context['last_payment_date'] = last_payment_date
context['paid_amount'] = paid_amount
context['subtotal'] = subtotal
if pdf:
response = HttpResponse(content_type='application/pdf')
if company.name:
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
else:
company_name = 'COMPANY'
filename = '_'.join([document_type_upper, document_id, company_name])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_invoice.html', context=context, file_object=response)
else:
return render(request, 'invoice.html', context)
@staff_member_required
def invoice_edit(request, pk=None):
amount = request.GET.get('amount')
paid_amount = request.GET.get('paid_amount')
subtotal = request.GET.get('subtotal')
times = request.GET.get('times')
paid = request.GET.get('paid')
company = Company.get_solo()
project = request.GET.get('project')
url_name = 'invoice_index'
kwargs, url_name = get_url_name(
'invoice', page_type='index_or_edit', pk=pk)
invoice = get_object_or_404(Invoice, pk=pk)
project = get_object_or_404(Project, pk=project)
if invoice.project:
if invoice.project.client and not invoice.client:
invoice.client = invoice.project.client
invoice.save()
if paid and times:
times = Time.objects.filter(pk__in=[int(i) for i in times.split(',')])
for entry in times:
entry.invoiced = True
entry.save()
elif times:
invoice = get_object_or_404(Invoice, pk=pk)
times = Time.objects.filter(pk__in=[int(i) for i in times.split(',')])
for entry in times:
entry.invoice = invoice
entry.save()
return edit(
request,
InvoiceForm,
Invoice,
url_name,
'invoice_edit.html',
active_nav='invoice',
amount=amount,
company=company,
kwargs=kwargs,
paid_amount=paid_amount,
paid=paid,
pk=pk,
project=project,
subtotal=subtotal)
@staff_member_required
def invoice_index(request):
company = Company.get_solo()
settings = Settings.get_solo()
search_fields = (
'client__name',
'document_id',
'issue_date',
'project__name',
'subject', )
context = index_items(
request,
Invoice,
search_fields,
active_nav='invoice',
app_settings=settings,
edit_url='invoice_edit', # Delete modal
order_by=('-issue_date', ),
show_search=True)
context['company'] = company
return render(request, 'invoice_index.html', context)
def login(request):
context = {}
context['login'] = True
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
# https://stackoverflow.com/a/39316967/185820
auth_login(request, user)
return HttpResponseRedirect(reverse('home'))
else:
messages.add_message(request, messages.WARNING, 'Login failed.')
return HttpResponseRedirect(reverse('home'))
return render(request, 'login.html', context)
@staff_member_required
def log_index(request):
settings = Settings.get_solo()
search_fields = ('entry', )
context = index_items(
request,
Log,
search_fields,
order_by=('-created', ),
app_settings=settings)
return render(request, 'log_index.html', context)
@staff_member_required
def newsletter(request, pk=None):
"""
"""
context = {}
newsletter = get_object_or_404(Newsletter, pk=pk)
context['active_nav'] = 'newsletter'
context['contacts'] = newsletter.contacts.all().order_by('first_name')
context['edit_url'] = 'newsletter_edit'
context['item'] = newsletter
return render(request, 'newsletter.html', context)
@staff_member_required
def newsletter_edit(request, pk=None):
"""
"""
kwargs, url_name = get_url_name(
'newsletter', page_type='index_or_edit', pk=pk)
return edit(
request,
NewsletterForm,
Newsletter,
url_name,
'newsletter_edit.html',
active_nav='newsletter',
kwargs=kwargs,
pk=pk)
@staff_member_required
def newsletter_index(request, pk=None):
"""
"""
settings = Settings.get_solo()
search_fields = ('text', )
context = index_items(
request,
Newsletter,
search_fields,
active_nav='newsletter',
app_settings=settings,
order_by=('-created', ))
return render(request, 'newsletter_index.html', context)
@staff_member_required
def newsletter_send(request, pk=None):
"""
"""
context = {}
newsletter = get_object_or_404(Newsletter, pk=pk)
contacts = newsletter.contacts.all().order_by('first_name')
for contact in contacts:
url = reverse('contact_unsubscribe', kwargs={'pk': contact.pk})
url = ''.join([request.get_host(), url])
to = contact.email
first_name = contact.first_name
subject = newsletter.subject
message = newsletter.text
if send_mail(
request,
subject,
message,
to,
url=url,
uuid=contact.uuid,
first_name=first_name):
log = Log(entry='Mail sent to %s.' % to)
log.save()
messages.add_message(request, messages.SUCCESS, 'Batch mail sent!')
context['active_nav'] = 'newsletter'
context['contacts'] = contacts
context['edit_url'] = 'newsletter_edit'
context['item'] = newsletter
return render(request, 'newsletter.html', context)
@staff_member_required
def note(request, pk=None):
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
note = get_object_or_404(Note, pk=pk)
notes = Note.objects.filter(note=note)
notes = notes.order_by('-pk')
context['active_nav'] = 'note'
context['edit_url'] = 'note_edit'
context['item'] = note
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=note-%s.pdf' % pk
return generate_pdf(
'pdf_note.html', context=context, file_object=response)
else:
return render(request, 'note.html', context)
@staff_member_required
def note_edit(request, pk=None):
company = Company.get_solo()
kwargs, url_name = get_url_name('note', page_type='index_or_edit', pk=pk)
return edit(
request,
NoteForm,
Note,
url_name,
'note_edit.html',
active_nav='note',
company=company,
kwargs=kwargs,
pk=pk)
@staff_member_required
def note_index(request, pk=None):
settings = Settings.get_solo()
search_fields = ('note', )
filters = {'hidden': False, }
context = index_items(
request,
Note,
search_fields,
active_nav='note',
app_settings=settings,
filters=filters,
order_by=('-active', '-created', 'note', 'due_date', 'priority'),
show_search=True)
context['edit_url'] = 'note_edit' # Delete modal
return render(request, 'note_index.html', context)
@staff_member_required
def project(request, pk=None):
settings = Settings.get_solo()
context = {}
project = get_object_or_404(Project, pk=pk)
times = Time.objects.filter(
project=project, invoiced=False).order_by('-date')
estimates = Estimate.objects.filter(project=project)
invoices = Invoice.objects.filter(project=project)
context['active_nav'] = 'project'
context['company'] = Company.get_solo()
context['edit_url'] = 'project_edit' # Delete modal
context['icon_size'] = get_setting(request, settings, 'icon_size')
context['estimates'] = estimates
context['invoices'] = invoices
context['item'] = project
context['times'] = times
return render(request, 'project.html', context)
@staff_member_required
def project_edit(request, pk=None):
# client = request.GET.get('client')
# client = get_object_or_404(Client, pk=client)
# clients = Client.objects.filter(active=True)
kwargs, url_name = get_url_name(
'project', page_type='index_or_edit', pk=pk)
return edit(
request,
ProjectForm,
Project,
url_name,
'project_edit.html',
active_nav='project',
# client=client,
# clients=clients,
kwargs=kwargs,
pk=pk)
@staff_member_required
def project_index(request, pk=None):
settings = Settings.get_solo()
search_fields = ('id', 'name')
context = index_items(
request,
Project,
search_fields,
active_nav='project',
app_settings=settings,
edit_url='project_edit', # Delete modal
order_by=('-active', ),
show_search=True)
return render(request, 'project_index.html', context)
@staff_member_required
def proposal(request, pk=None):
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
proposal = get_object_or_404(Proposal, pk=pk)
context['active_nav'] = 'proposal'
context['edit_url'] = 'proposal_edit'
context['item'] = proposal
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=proposal-%s.pdf' % pk
return generate_pdf(
'pdf_proposal.html', context=context, file_object=response)
else:
return render(request, 'proposal.html', context)
def proposal_edit(request, pk=None):
"""
"""
company = Company.get_solo()
kwargs, url_name = get_url_name(
'proposal', page_type='index_or_edit', pk=pk)
return edit(
request,
ProposalForm,
Proposal,
url_name,
'proposal_edit.html',
active_nav='proposal',
company=company,
kwargs=kwargs,
pk=pk)
@staff_member_required
def proposal_index(request, pk=None):
settings = Settings.get_solo()
search_fields = ()
context = index_items(
request,
Proposal,
search_fields,
active_nav='proposal',
app_settings=settings,
show_search=True)
context['edit_url'] = 'proposal_edit' # Delete modal
return render(request, 'proposal_index.html', context)
@staff_member_required
def report(request, pk=None):
company = Company.get_solo()
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
report = get_object_or_404(Report, pk=pk)
reports = Report.objects.filter(active=True)
reports = reports.aggregate(gross=Sum(F('gross')), net=Sum(F('net')))
context['active_nav'] = 'report'
context['company'] = company
context['cost'] = report.gross - report.net
context['edit_url'] = 'report_edit' # Delete modal
context['item'] = report
context['reports'] = reports
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=report-%s.pdf' % pk
return generate_pdf(
'pdf_report.html', context=context, file_object=response)
else:
return render(request, 'report.html', context)
@staff_member_required
def report_edit(request, pk=None):
gross, net, invoices_active = dashboard_totals(Invoice)
kwargs, url_name = get_url_name('report', page_type='index_or_edit', pk=pk)
return edit(
request,
ReportForm,
Report,
url_name,
'report_edit.html',
active_nav='report',
gross=gross,
invoices_active=invoices_active,
kwargs=kwargs,
net=net,
pk=pk)
@staff_member_required
def report_index(request):
settings = Settings.get_solo()
show_plot = False
reports = Report.objects.filter(active=True)
plot_items = reports # Save for plotting
reports = reports.aggregate(gross=Sum(F('gross')), net=Sum(F('net')))
company = Company.get_solo()
search_fields = ('id', 'name', 'gross', 'net')
context = index_items(
request,
Report,
search_fields,
active_nav='report',
app_settings=settings,
edit_url='report_edit', # Delete modal
order_by=('-date', ),
show_search=True)
if reports['gross'] is not None and reports['net'] is not None:
cost = reports['gross'] - reports['net']
else:
reports['gross'] = 0
reports['net'] = 0
cost = 0
if 'items' in context:
if len(context['items']) > 1:
show_plot = True
context['reports'] = reports
context['company'] = company
context['cost'] = cost
context['show_plot'] = show_plot
context['plot_items'] = plot_items
return render(request, 'report_index.html', context)
def report_plot(request): # http://stackoverflow.com/a/5515994/185820
"""
"""
values = get_query(request, 'values')
# http://matplotlib.org/examples/api/date_demo.html
x = [date2num(datetime.strptime(i[1], '%Y-%m-%d')) for i in values]
y = [i[0] for i in values]
figure = Figure()
canvas = FigureCanvasAgg(figure)
axes = figure.add_subplot(1, 1, 1)
axes.grid(True)
axes.plot(x, y)
axes.xaxis.set_major_locator(MonthLocator())
axes.xaxis.set_major_formatter(DateFormatter('%m'))
# write image data to a string buffer and get the PNG image bytes
buf = BytesIO()
canvas.print_png(buf)
data = buf.getvalue()
# write image bytes back to the browser
return HttpResponse(data, content_type="image/png")
@staff_member_required
def settings(request):
context = {}
settings = Settings.get_solo()
context['settings'] = settings
context['active_tab'] = 'system'
return render(request, 'settings.html', context)
@staff_member_required
def settings_edit(request, pk=None):
return edit(
request,
SettingsForm,
Settings,
'settings',
'settings_edit.html',
pk=1)
@staff_member_required
def task(request, pk=None):
context = {}
task = get_object_or_404(Task, pk=pk)
context['active_nav'] = 'task'
context['edit_url'] = 'task_edit' # Delete modal
context['item'] = task
return render(request, 'task.html', context)
@staff_member_required
def task_edit(request, pk=None):
kwargs, url_name = get_url_name('task', page_type='index_or_edit', pk=pk)
return edit(
request,
TaskForm,
Task,
url_name,
'task_edit.html',
active_nav='task',
pk=pk,
kwargs=kwargs)
@staff_member_required
def task_index(request):
settings = Settings.get_solo()
search_fields = ('name', )
context = index_items(
request,
Task,
search_fields,
active_nav='task',
app_settings=settings,
edit_url='task_edit', # Delete modal
order_by=('-active', ),
show_search=True)
return render(request, 'task_index.html', context)
@login_required
def time(request, pk=None):
context = {}
entry = get_object_or_404(Time, pk=pk)
if not entry.user and not request.user.is_staff:
return HttpResponseRedirect(reverse('admin:index'))
if entry.user:
if (not entry.user.username == request.user.username and
not request.user.is_staff):
return HttpResponseRedirect(reverse('admin:index'))
context['active_nav'] = 'time'
context['edit_url'] = 'entry_edit' # Delete modal
context['item'] = entry
return render(request, 'time.html', context)
@login_required
def time_edit(request, pk=None):
client = request.GET.get('client')
project = request.GET.get('project')
task = None
kwargs, url_name = get_url_name('time', page_type='index_or_edit', pk=pk)
if pk is not None:
entry = get_object_or_404(Time, pk=pk)
if entry.user:
if (entry.user.username != request.user.username and
not request.user.is_staff):
return HttpResponseRedirect(reverse('admin:index'))
else:
if not request.user.is_staff:
return HttpResponseRedirect(reverse('admin:index'))
if client:
client = get_object_or_404(Client, pk=client)
if project:
project = get_object_or_404(Project, pk=project)
if project.task:
task = get_object_or_404(Task, pk=project.task.pk)
projects = Project.objects.filter(team=request.user.pk)
clients = Client.objects.filter(
pk__in=[i.client.pk for i in projects if i.client])
tasks = Task.objects.filter(pk__in=[i.task.pk for i in projects if i.task])
if request.user.is_staff:
from .forms import TimeAdminForm as TimeForm
else:
from .forms import TimeForm
return edit(
request,
TimeForm,
Time,
url_name,
'time_edit.html',
active_nav='time',
client=client,
clients=clients,
pk=pk,
project=project,
projects=projects,
task=task,
tasks=tasks,
kwargs=kwargs)
@login_required
def time_index(request):
search_fields = ('client__name', 'date', 'notes', 'pk', 'project__name',
'invoice__document_id', 'user__username')
settings = Settings.get_solo()
context = index_items(
request,
Time,
search_fields,
active_nav='time',
app_settings=settings,
edit_url='entry_edit', # Delete modal
page_size=3,
order_by=('-date', ),
show_search=True)
if not request.user.is_staff:
return HttpResponseRedirect(reverse('admin:index'))
else:
return render(request, 'time_index.html', context)
@login_required
def user(request, pk=None):
company = Company.get_solo()
contacts = Contact.objects.all()
settings = Settings.get_solo()
user = get_object_or_404(User, pk=pk)
profile = Profile.objects.get_or_create(user=user)[0]
filters = {
'estimate': None,
'user': user,
}
search_fields = ()
context = index_items(
request,
Time,
search_fields,
order_by=('-date', ),
filters=filters,
app_settings=settings)
total_hours = context['total_hours']
if profile.rate and total_hours:
total_dollars = profile.rate * total_hours
else:
total_dollars = 0
context['active_nav'] = 'user'
context['company'] = company
context['edit_url'] = 'user_edit' # Delete modal
context['icon_size'] = get_setting(request, settings, 'icon_size')
context['item'] = user
context['profile'] = profile
context['request'] = request
context['total_dollars'] = '%.2f' % total_dollars
context['is_contact'] = user.email in [i.email for i in contacts]
# XXX One off to list projects, maybe refactor index_items to return
# multiple listings e.g.
# projects = index_items()
# times = index_items()
# context['projects'] = projects
# context['times'] = times
projects = Project.objects.filter(team__in=[user, ]).order_by('-updated')
context['projects'] = projects
if request.user.pk == int(pk) or request.user.is_staff:
return render(request, 'user.html', context)
else:
return HttpResponseRedirect(reverse('home'))
@staff_member_required
def user_contact(request, pk=None):
return add_user_to_contacts(request, Contact, pk=pk)
@login_required
def user_edit(request, pk=None):
context = {}
kwargs, url_name = get_url_name('user', page_type='index_or_edit', pk=pk)
return edit(
request,
ProfileForm,
Profile,
url_name,
'user_edit.html',
active_nav='user',
context=context,
kwargs=kwargs,
pk=pk)
@staff_member_required
def user_index(request):
company = Company.get_solo()
settings = Settings.get_solo()
# XXX FieldError at /user
# Cannot resolve keyword 'updated' into field.
# search_fields = ('first_name', 'last_name', 'email')
search_fields = ()
context = index_items(
request,
User,
search_fields,
active_nav='user',
app_settings=settings,
order_by=('-profile__active', ),
show_search=False)
context['company'] = company
# Check if user is contact
contacts = Contact.objects.all()
items = context['items']
for item in items:
if item.email in [i.email for i in contacts]:
item.is_contact = True
else:
item.is_contact = False
context['items'] = items
return render(request, 'user_index.html', context)
|
#!/usr/bin/python
# To run this test you need python nose tools installed
# Run test just use:
# nosetest tests.py
#
# *Note: If you add additional test, please prefix the function name
# to the type of operation being performed. For instance modifying an
# image, test_image_erode(). If you are looking for lines, then
# test_detection_lines(). This makes it easier to verify visually
# that all the correct test per operation exist
import os, sys
from SimpleCV import *
from nose.tools import with_setup
#colors
black = Color.BLACK
white = Color.WHITE
red = Color.RED
green = Color.GREEN
blue = Color.BLUE
#images
barcode = "../sampleimages/barcode.png"
testimage = "../sampleimages/9dots4lines.png"
testimage2 = "../sampleimages/aerospace.jpg"
whiteimage = "../sampleimages/white.png"
blackimage = "../sampleimages/black.png"
testimageclr = "../sampleimages/statue_liberty.jpg"
testbarcode = "../sampleimages/barcode.png"
testoutput = "../sampleimages/9d4l.jpg"
tmpimg = "../sampleimages/tmpimg.jpg"
greyscaleimage = "../sampleimages/greyscale.jpg"
logo = "../sampleimages/logo.png"
logo_inverted = "../sampleimages/logo_inverted.png"
ocrimage = "../sampleimages/ocr-test.png"
#These function names are required by nose test, please leave them as is
def setup_context():
img = Image(testimage)
def destroy_context():
img = ""
@with_setup(setup_context, destroy_context)
def test_image_loadsave():
img = Image(testimage)
img.save(testoutput)
if (os.path.isfile(testoutput)):
os.remove(testoutput)
pass
else:
assert False
def test_image_numpy_constructor():
img = Image(testimage)
grayimg = img.grayscale()
chan3_array = np.array(img.getMatrix())
chan1_array = np.array(img.getGrayscaleMatrix())
img2 = Image(chan3_array)
grayimg2 = Image(chan1_array)
if (img2[0,0] == img[0,0] and grayimg2[0,0] == grayimg[0,0]):
pass
else:
assert False
def test_image_bitmap():
img = Image(testimage)
bmp = img.getBitmap();
if bmp.width > 0:
pass
else:
assert False
# Image Class Test
def test_image_stretch():
img = Image(greyscaleimage)
stretched = img.stretch(100,200)
img.save(tmpimg)
def test_image_scale():
img = Image(testimage)
thumb = img.scale(30,30)
thumb.save(testoutput)
def test_image_copy():
img = Image(testimage2)
copy = img.copy()
if (img[1,1] != copy[1,1] or img.size() != copy.size()):
assert False
pass
def test_image_getitem():
img = Image(testimage)
colors = img[1,1]
if (colors[0] == 255 and colors[1] == 255 and colors[2] == 255):
pass
else:
assert False
def test_image_getslice():
img = Image(testimage)
section = img[1:10,1:10]
section.save(testoutput)
pass
def test_image_setitem():
img = Image(testimage)
img[1,1] = (0, 0, 0)
newimg = Image(img.getBitmap())
colors = newimg[1,1]
if (colors[0] == 0 and colors[1] == 0 and colors[2] == 0):
pass
else:
assert False
def test_image_setslice():
img = Image(testimage)
img[1:10,1:10] = (0,0,0) #make a black box
newimg = Image(img.getBitmap())
section = newimg[1:10,1:10]
for i in range(5):
colors = section[i,0]
if (colors[0] != 0 or colors[1] != 0 or colors[2] != 0):
assert False
pass
def test_detection_findCorners():
img = Image(testimage2)
corners = img.findCorners(25)
if (len(corners) == 0):
assert False
corners.draw()
img.save(testoutput)
def test_color_meancolor():
img = Image(testimage2)
roi = img[1:50,1:50]
r, g, b = roi.meanColor()
if (r >= 0 and r <= 255 and g >= 0 and g <= 255 and b >= 0 and b <= 255):
pass
def test_image_smooth():
img = Image(testimage2)
img.smooth()
img.smooth('bilateral', (3,3), 4, 1)
img.smooth('blur', (3, 3))
img.smooth('median', (3, 3))
img.smooth('gaussian', (5,5), 0)
pass
def test_image_binarize():
img = Image(testimage2)
binary = img.binarize()
binary2 = img.binarize((60, 100, 200))
hist = binary.histogram(20)
hist2 = binary2.histogram(20)
if (hist[0] + hist[-1] == np.sum(hist) and hist2[0] + hist2[-1] == np.sum(hist2)):
pass
else:
assert False
def test_image_binarize_adaptive():
img = Image(testimage2)
binary = img.binarize(-1)
hist = binary.histogram(20)
if (hist[0] + hist[-1] == np.sum(hist)):
pass
else:
assert False
def test_image_invert():
img = Image(testimage2)
clr = img[1,1]
img = img.invert()
if (clr[0] == (255 - img[1,1][0])):
pass
else:
assert False
def test_image_size():
img = Image(testimage2)
(width, height) = img.size()
if type(width) == int and type(height) == int and width > 0 and height > 0:
pass
else:
assert False
def test_image_drawing():
img = Image(testimageclr)
img.drawCircle((5, 5), 3)
img.drawLine((5, 5), (5, 8))
def test_image_splitchannels():
img = Image(testimageclr)
(r, g, b) = img.splitChannels(True)
(red, green, blue) = img.splitChannels()
pass
def test_image_histogram():
img = Image(testimage2)
h = img.histogram(25)
for i in h:
if type(i) != int:
assert False
pass
def test_detection_lines():
img = Image(testimage2)
lines = img.findLines()
lines.draw()
img.save(testoutput)
def test_detection_feature_measures():
img = Image(testimage2)
fs = FeatureSet()
fs.append(Corner(img, 5, 5))
fs.append(Line(img, ((2, 2), (3,3))))
print(fs)
#if BLOBS_ENABLED:
bm = BlobMaker()
result = bm.extract(img)
fs.extend(result)
print(fs)
#fs.append(img.findBlobs()[0])
#if ZXING_ENABLED:
# fake_barcode = Barcode(img, zxing.BarCode("""
#file:default.png (format: FAKE_DATA, type: TEXT):
#Raw result:
#foo-bar|the bar of foo
#Parsed result:
#foo-bar
#the bar of foo
#Also, there were 4 result points:
# Point 0: (24.0,18.0)
# Point 1: (21.0,196.0)
# Point 2: (201.0,198.0)
# Point 3: (205.23952,21.0)
#"""))
#fs.append(fake_barcode)
for f in fs:
a = f.area()
l = f.length()
c = f.meanColor()
d = f.colorDistance()
th = f.angle()
pts = f.coordinates()
dist = f.distanceFrom() #distance from center of image
fs2 = fs.sortAngle()
fs3 = fs.sortLength()
fs4 = fs.sortColorDistance()
fs5 = fs.sortArea()
fs1 = fs.sortDistance()
def test_detection_blobs():
if not BLOBS_ENABLED:
return None
img = Image(testbarcode)
bm = BlobMaker()
blobs = bm.extract(img)
blobs[0].draw()
img.save(testoutput)
pass
def test_detection_blobs_adaptive():
if not BLOBS_ENABLED:
return None
img = Image(testimage)
bm = BlobMaker()
result = bm.extract(img, threshval=-1)
result[0].draw()
img.save(testoutput)
pass
def test_detection_barcode():
if not ZXING_ENABLED:
return None
nocode = Image(testimage).findBarcode()
if nocode: #we should find no barcode in our test image
assert False
code = Image(testbarcode).findBarcode()
if code.points:
pass
def test_detection_x():
tmpX = Image(testimage).findLines().x()[0]
if (tmpX > 0 and Image(testimage).size()[0]):
pass
else:
assert False
def test_detection_y():
tmpY = Image(testimage).findLines().y()[0]
if (tmpY > 0 and Image(testimage).size()[0]):
pass
else:
assert False
def test_detection_area():
img = Image(testimage2)
bm = BlobMaker()
result = bm.extract(img)
area_val = result[0].area()
if(area_val > 0):
pass
else:
assert False
def test_detection_angle():
angle_val = Image(testimage).findLines().angle()[0]
def test_image():
img = Image(testimage)
if(isinstance(img, Image)):
pass
else:
assert False
def test_color_colordistance():
img = Image(blackimage)
(r,g,b) = img.splitChannels()
avg = img.meanColor()
c1 = Corner(img, 1, 1)
c2 = Corner(img, 1, 2)
if (c1.colorDistance(c2.meanColor()) != 0):
assert False
if (c1.colorDistance((0,0,0)) != 0):
assert False
if (c1.colorDistance((0,0,255)) != 255):
assert False
if (c1.colorDistance((255,255,255)) != sqrt(255**2 * 3)):
assert False
pass
def test_detection_length():
img = Image(testimage)
val = img.findLines().length()
if (val == None):
assert False
if (not isinstance(val, np.ndarray)):
assert False
if (len(val) < 0):
assert False
pass
def test_detection_sortangle():
img = Image(testimage)
val = img.findLines().sortAngle()
if(val[0].x < val[1].x):
pass
else:
assert False
def test_detection_sortarea():
img = Image(testimage)
bm = BlobMaker()
result = bm.extract(img)
val = result.sortArea()
#FIXME: Find blobs may appear to be broken. Returning type none
def test_detection_sortLength():
img = Image(testimage)
val = img.findLines().sortLength()
#FIXME: Length is being returned as euclidean type, believe we need a universal type, either Int or scvINT or something.
#def test_distanceFrom():
#def test_sortColorDistance():
#def test_sortDistance():
def test_image_add():
imgA = Image(blackimage)
imgB = Image(whiteimage)
imgC = imgA + imgB
def test_color_curve_HSL():
y = np.array([[0,0],[64,128],[192,128],[255,255]]) #These are the weights
curve = ColorCurve(y)
img = Image(testimage)
img2 = img.applyHLSCurve(curve,curve,curve)
img3 = img-img2
c = img3.meanColor()
print(c)
if( c[0] > 2.0 or c[1] > 2.0 or c[2] > 2.0 ): #there may be a bit of roundoff error
assert False
def test_color_curve_RGB():
y = np.array([[0,0],[64,128],[192,128],[255,255]]) #These are the weights
curve = ColorCurve(y)
img = Image(testimage)
img2 = img.applyRGBCurve(curve,curve,curve)
img3 = img-img2
c = img3.meanColor()
if( c[0] > 1.0 or c[1] > 1.0 or c[2] > 1.0 ): #there may be a bit of roundoff error
assert False
def test_color_curve_GRAY():
y = np.array([[0,0],[64,128],[192,128],[255,255]]) #These are the weights
curve = ColorCurve(y)
img = Image(testimage)
gray = img.grayscale()
img2 = img.applyIntensityCurve(curve)
print(gray.meanColor())
print(img2.meanColor())
g=gray.meanColor()
i2=img2.meanColor()
if( g[0]-i2[0] > 1 ): #there may be a bit of roundoff error
assert False
def test_image_dilate():
img=Image(barcode)
img2 = img.dilate(20)
c=img2.meanColor()
print(c)
if( c[0] < 254 or c[1] < 254 or c[2] < 254 ):
assert False;
def test_image_erode():
img=Image(barcode)
img2 = img.erode(100)
c=img2.meanColor()
print(c)
if( c[0] > 0 or c[1] > 0 or c[2] > 0 ):
assert False;
def test_image_morph_open():
img = Image(barcode);
erode= img.erode()
dilate = erode.dilate()
result = img.morphOpen()
test = result-dilate
c=test.meanColor()
print(c)
if( c[0] > 1 or c[1] > 1 or c[2] > 1 ):
assert False;
def test_image_morph_close():
img = Image(barcode)
dilate = img.dilate()
erode = dilate.erode()
result = img.morphClose()
test = result-erode
c=test.meanColor()
print(c)
if( c[0] > 1 or c[1] > 1 or c[2] > 1 ):
assert False;
def test_image_morph_grad():
img = Image(barcode)
dilate = img.dilate()
erode = img.erode()
dif = dilate-erode
result = img.morphGradient()
test = result-dif
c=test.meanColor()
if( c[0] > 1 or c[1] > 1 or c[2] > 1 ):
assert False
def test_image_rotate_fixed():
img = Image(testimage2)
img2=img.rotate(180, scale = 1)
img3=img.flipVertical()
img4=img3.flipHorizontal()
test = img4-img2
c=test.meanColor()
print(c)
if( c[0] > 5 or c[1] > 5 or c[2] > 5 ):
assert False
def test_image_rotate_full():
img = Image(testimage2)
img2=img.rotate(180,"full",scale = 1)
c1=img.meanColor()
c2=img2.meanColor()
if( abs(c1[0]-c2[0]) > 5 or abs(c1[1]-c2[1]) > 5 or abs(c1[2]-c2[2]) > 5 ):
assert False
def test_image_shear_warp():
img = Image(testimage2)
dst = ((img.width/2,0),(img.width-1,img.height/2),(img.width/2,img.height-1))
s = img.shear(dst)
color = s[0,0]
if (color != (0,0,0)):
assert False
dst = ((img.width*0.05,img.height*0.03),(img.width*0.9,img.height*0.1),(img.width*0.8,img.height*0.7),(img.width*0.2,img.height*0.9))
w = img.warp(dst)
color = s[0,0]
if (color != (0,0,0)):
assert False
pass
def test_image_affine():
img = Image(testimage2)
src = ((0,0),(img.width-1,0),(img.width-1,img.height-1))
dst = ((img.width/2,0),(img.width-1,img.height/2),(img.width/2,img.height-1))
aWarp = cv.CreateMat(2,3,cv.CV_32FC1)
cv.GetAffineTransform(src,dst,aWarp)
atrans = img.transformAffine(aWarp)
aWarp2 = np.array(aWarp)
atrans2 = img.transformAffine(aWarp2)
test = atrans-atrans2
c=test.meanColor()
if( c[0] > 1 or c[1] > 1 or c[2] > 1 ):
assert False
def test_image_perspective():
img = Image(testimage2)
src = ((0,0),(img.width-1,0),(img.width-1,img.height-1),(0,img.height-1))
dst = ((img.width*0.05,img.height*0.03),(img.width*0.9,img.height*0.1),(img.width*0.8,img.height*0.7),(img.width*0.2,img.height*0.9))
pWarp = cv.CreateMat(3,3,cv.CV_32FC1)
cv.GetPerspectiveTransform(src,dst,pWarp)
ptrans = img.transformPerspective(pWarp)
pWarp2 = np.array(pWarp)
ptrans2 = img.transformPerspective(pWarp2)
test = ptrans-ptrans2
c=test.meanColor()
if( c[0] > 1 or c[1] > 1 or c[2] > 1 ):
assert False
def test_image_horz_scanline():
img = Image(logo)
sl = img.getHorzScanline(10)
if( sl.shape[0]!=img.width or sl.shape[1]!=3 ):
assert False
def test_image_vert_scanline():
img = Image(logo)
sl = img.getVertScanline(10)
if( sl.shape[0]!=img.height or sl.shape[1]!=3 ):
assert False
def test_image_horz_scanline_gray():
img = Image(logo)
sl = img.getHorzScanlineGray(10)
if( sl.shape[0]!=img.width or sl.shape[1]!=1 ):
assert False
def test_image_vert_scanline_gray():
img = Image(logo)
sl = img.getVertScanlineGray(10)
if( sl.shape[0]!=img.height or sl.shape[1]!=1 ):
assert False
def test_image_get_pixel():
img = Image(logo)
px = img.getPixel(0,0)
if(px[0] != 0 or px[1] != 0 or px[2] != 0 ):
assert False
def test_image_get_gray_pixel():
img = Image(logo)
px = img.getGrayPixel(0,0)
if(px != 0):
assert False
def test_camera_calibration():
fakeCamera = FrameSource()
path = "../sampleimages/CalibImage"
ext = ".png"
imgs = []
for i in range(0,10):
fname = path+str(i)+ext
img = Image(fname)
imgs.append(img)
fakeCamera.calibrate(imgs)
#we're just going to check that the function doesn't puke
mat = fakeCamera.getCameraMatrix()
if( type(mat) != cv.cvmat ):
assert False
#we're also going to test load in save in the same pass
matname = "TestCalibration"
if( False == fakeCamera.saveCalibration(matname)):
assert False
if( False == fakeCamera.loadCalibration(matname)):
assert False
def test_camera_undistort():
fakeCamera = FrameSource()
fakeCamera.loadCalibration("Default")
img = Image("../sampleimages/CalibImage0.png")
img2 = fakeCamera.undistort(img)
if( not img2 ): #right now just wait for this to return
assert False
def test_image_crop():
img = Image(logo)
x = 5
y = 6
w = 10
h = 20
crop = img.crop(x,y,w,h)
crop2 = img[x:(x+w),y:(y+h)]
diff = crop-crop2;
c=diff.meanColor()
if( c[0] > 0 or c[1] > 0 or c[2] > 0 ):
assert False
def test_image_region_select():
img = Image(logo)
x1 = 0
y1 = 0
x2 = img.width
y2 = img.height
crop = img.regionSelect(x1,y1,x2,y2)
diff = crop-img;
c=diff.meanColor()
if( c[0] > 0 or c[1] > 0 or c[2] > 0 ):
assert False
def test_image_subtract():
imgA = Image(logo)
imgB = Image(logo_inverted)
imgC = imgA - imgB
def test_image_negative():
imgA = Image(logo)
imgB = -imgA
def test_image_divide():
imgA = Image(logo)
imgB = Image(logo_inverted)
imgC = imgA / imgB
def test_image_and():
imgA = Image(logo)
imgB = Image(logo_inverted)
imgC = imgA and imgB
def test_image_or():
imgA = Image(logo)
imgB = Image(logo_inverted)
imgC = imgA or imgB
def test_image_edgemap():
imgA = Image(logo)
imgB = imgA._getEdgeMap()
def test_color_colormap_build():
cm = ColorModel()
cm.add(Image(testimage))
cm.add((127,127,127))
if(cm.contains((127,127,127))):
cm.remove((127,127,127))
else:
assert False
img = cm.threshold(Image(testimage))
c=img.meanColor()
if( c[0] > 1 or c[1] > 1 or c[2] > 1 ):
assert False
cm.save("temp.txt")
cm2 = ColorModel()
cm2.load("temp.txt")
img = cm2.threshold(Image(testimage))
c=img.meanColor()
if( c[0] > 1 or c[1] > 1 or c[2] > 1 ):
assert False
def test_feature_height():
imgA = Image(logo)
lines = imgA.findLines(1)
heights = lines.height()
if(len(heights) <= 0 ):
assert False
else:
pass
def test_feature_width():
imgA = Image(logo)
lines = imgA.findLines(1)
widths = lines.width()
if(len(widths) <= 0):
assert False
else:
pass
def test_feature_crop():
imgA = Image(logo)
lines = imgA.findLines(1)
croppedImages = lines.crop()
if(len(croppedImages) <= 0):
assert False
else:
pass
def test_color_conversion_func_BGR():
#we'll just go through the space to make sure nothing blows up
img = Image(testimage)
bgr = img.toBGR()
rgb = img.toRGB()
hls = img.toHLS()
hsv = img.toHSV()
xyz = img.toXYZ()
foo = bgr.toBGR()
foo = bgr.toRGB()
foo = bgr.toHLS()
foo = bgr.toHSV()
foo = bgr.toXYZ()
def test_color_conversion_func_RGB():
img = Image(testimage)
if( not img.isBGR() ):
assert False
rgb = img.toRGB()
foo = rgb.toBGR()
if( not foo.isBGR() ):
assert False
foo = rgb.toRGB()
if( not foo.isRGB() ):
assert False
foo = rgb.toHLS()
if( not foo.isHLS() ):
assert False
foo = rgb.toHSV()
if( not foo.isHSV() ):
assert False
foo = rgb.toXYZ()
if( not foo.isXYZ() ):
assert False
def test_color_conversion_func_HSV():
img = Image(testimage)
hsv = img.toHSV()
foo = hsv.toBGR()
foo = hsv.toRGB()
foo = hsv.toHLS()
foo = hsv.toHSV()
foo = hsv.toXYZ()
def test_color_conversion_func_HLS():
img = Image(testimage)
hls = img.toHLS()
foo = hls.toBGR()
foo = hls.toRGB()
foo = hls.toHLS()
foo = hls.toHSV()
foo = hls.toXYZ()
def test_color_conversion_func_XYZ():
img = Image(testimage)
xyz = img.toXYZ()
foo = xyz.toBGR()
foo = xyz.toRGB()
foo = xyz.toHLS()
foo = xyz.toHSV()
foo = xyz.toXYZ()
def test_blob_maker():
img = Image("../sampleimages/blockhead.png")
blobber = BlobMaker()
results = blobber.extract(img)
print(len(results))
if( len(results) != 7 ):
assert False
def test_blob_holes():
img = Image("../sampleimages/blockhead.png")
blobber = BlobMaker()
blobs = blobber.extract(img)
count = 0
for b in blobs:
if( b.mHoleContour is not None ):
count = count + len(b.mHoleContour)
if( count != 7 ):
assert False
def test_blob_hull():
img = Image("../sampleimages/blockhead.png")
blobber = BlobMaker()
blobs = blobber.extract(img)
for b in blobs:
if( len(b.mConvexHull) < 3 ):
assert False
def test_blob_data():
img = Image("../sampleimages/blockhead.png")
blobber = BlobMaker()
blobs = blobber.extract(img)
for b in blobs:
if(b.mArea > 0):
pass
if(b.mPerimeter > 0):
pass
if(sum(b.mAvgColor) > 0 ):
pass
if(sum(b.mBoundingBox) > 0 ):
pass
if(b.m00 is not 0 and
b.m01 is not 0 and
b.m10 is not 0 and
b.m11 is not 0 and
b.m20 is not 0 and
b.m02 is not 0 and
b.m21 is not 0 and
b.m12 is not 0 ):
pass
if(sum(b.mHu) > 0):
pass
def test_blob_render():
img = Image("../sampleimages/blockhead.png")
blobber = BlobMaker()
blobs = blobber.extract(img)
dl = DrawingLayer((img.width,img.height))
reimg = DrawingLayer((img.width,img.height))
for b in blobs:
b.draw(color=Color.RED, alpha=128)
b.drawHoles(width=2,color=Color.BLUE)
b.drawHull(color=Color.ORANGE,width=2)
b.draw(color=Color.RED, alpha=128,layer=dl)
b.drawHoles(width=2,color=Color.BLUE,layer=dl)
b.drawHull(color=Color.ORANGE,width=2,layer=dl)
b.drawMaskToLayer(reimg,offset=b.topLeftCorner())
pass
def test_blob_methods():
img = Image("../sampleimages/blockhead.png")
blobber = BlobMaker()
blobs = blobber.extract(img)
BL = (img.width,img.height)
first = blobs[0]
for b in blobs:
b.width()
b.height()
b.area()
b.maxX()
b.minX()
b.maxY()
b.minY()
b.minRectWidth()
b.minRectHeight()
b.minRectX()
b.minRectY()
b.aspectRatio()
b.angle()
if(not b.contains((b.x,b.y))):
assert False
if(b.below((0,0))):
assert False
if(not b.left((0,0))):
assert False
if(b.above(BL)):
assert False
if( not b.right(BL)):
assert False
b.overlaps(first)
b.above(first)
b.below(first)
b.left(first)
b.right(first)
b.contains(first)
b.overlaps(first)
def test_detection_ocr():
img = Image(ocrimage)
print "TESTING OCR"
foundtext = img.readText()
print foundtext
if(len(foundtext) <= 1):
assert False
else:
pass
def test_template_match():
source = Image("../sampleimages/templatetest.png")
template = Image("../sampleimages/template.png")
t = 5
methods = ["SQR_DIFF","SQR_DIFF_NORM","CCOEFF","CCOEFF_NORM","CCORR","CCORR_NORM"]
for m in methods:
fs = source.findTemplate(template,threshold=t,method=m)
pass
#def test_get_holes()
#def test
Removed some of the unneeded saves and cleaned up a few of the tests
#!/usr/bin/python
# To run this test you need python nose tools installed
# Run test just use:
# nosetest tests.py
#
# *Note: If you add additional test, please prefix the function name
# to the type of operation being performed. For instance modifying an
# image, test_image_erode(). If you are looking for lines, then
# test_detection_lines(). This makes it easier to verify visually
# that all the correct test per operation exist
import os, sys
from SimpleCV import *
from nose.tools import with_setup
#colors
black = Color.BLACK
white = Color.WHITE
red = Color.RED
green = Color.GREEN
blue = Color.BLUE
#images
barcode = "../sampleimages/barcode.png"
testimage = "../sampleimages/9dots4lines.png"
testimage2 = "../sampleimages/aerospace.jpg"
whiteimage = "../sampleimages/white.png"
blackimage = "../sampleimages/black.png"
testimageclr = "../sampleimages/statue_liberty.jpg"
testbarcode = "../sampleimages/barcode.png"
testoutput = "../sampleimages/9d4l.jpg"
tmpimg = "../sampleimages/tmpimg.jpg"
greyscaleimage = "../sampleimages/greyscale.jpg"
logo = "../sampleimages/logo.png"
logo_inverted = "../sampleimages/logo_inverted.png"
ocrimage = "../sampleimages/ocr-test.png"
#These function names are required by nose test, please leave them as is
def setup_context():
img = Image(testimage)
def destroy_context():
img = ""
@with_setup(setup_context, destroy_context)
def test_image_loadsave():
img = Image(testimage)
img.save(testoutput)
if (os.path.isfile(testoutput)):
os.remove(testoutput)
pass
else:
assert False
def test_image_numpy_constructor():
img = Image(testimage)
grayimg = img.grayscale()
chan3_array = np.array(img.getMatrix())
chan1_array = np.array(img.getGrayscaleMatrix())
img2 = Image(chan3_array)
grayimg2 = Image(chan1_array)
if (img2[0,0] == img[0,0] and grayimg2[0,0] == grayimg[0,0]):
pass
else:
assert False
def test_image_bitmap():
img = Image(testimage)
bmp = img.getBitmap();
if bmp.width > 0:
pass
else:
assert False
# Image Class Test
def test_image_stretch():
img = Image(greyscaleimage)
stretched = img.stretch(100,200)
if(stretched == None):
assert False
def test_image_scale():
img = Image(testimage)
thumb = img.scale(30,30)
if(thumb == None):
assert False
def test_image_copy():
img = Image(testimage2)
copy = img.copy()
if (img[1,1] != copy[1,1] or img.size() != copy.size()):
assert False
pass
def test_image_getitem():
img = Image(testimage)
colors = img[1,1]
if (colors[0] == 255 and colors[1] == 255 and colors[2] == 255):
pass
else:
assert False
def test_image_getslice():
img = Image(testimage)
section = img[1:10,1:10]
if(section == None):
assert False
def test_image_setitem():
img = Image(testimage)
img[1,1] = (0, 0, 0)
newimg = Image(img.getBitmap())
colors = newimg[1,1]
if (colors[0] == 0 and colors[1] == 0 and colors[2] == 0):
pass
else:
assert False
def test_image_setslice():
img = Image(testimage)
img[1:10,1:10] = (0,0,0) #make a black box
newimg = Image(img.getBitmap())
section = newimg[1:10,1:10]
for i in range(5):
colors = section[i,0]
if (colors[0] != 0 or colors[1] != 0 or colors[2] != 0):
assert False
pass
def test_detection_findCorners():
img = Image(testimage2)
corners = img.findCorners(25)
if (len(corners) == 0):
assert False
def test_color_meancolor():
img = Image(testimage2)
roi = img[1:50,1:50]
r, g, b = roi.meanColor()
if (r >= 0 and r <= 255 and g >= 0 and g <= 255 and b >= 0 and b <= 255):
pass
def test_image_smooth():
img = Image(testimage2)
img.smooth()
img.smooth('bilateral', (3,3), 4, 1)
img.smooth('blur', (3, 3))
img.smooth('median', (3, 3))
img.smooth('gaussian', (5,5), 0)
pass
def test_image_binarize():
img = Image(testimage2)
binary = img.binarize()
binary2 = img.binarize((60, 100, 200))
hist = binary.histogram(20)
hist2 = binary2.histogram(20)
if (hist[0] + hist[-1] == np.sum(hist) and hist2[0] + hist2[-1] == np.sum(hist2)):
pass
else:
assert False
def test_image_binarize_adaptive():
img = Image(testimage2)
binary = img.binarize(-1)
hist = binary.histogram(20)
if (hist[0] + hist[-1] == np.sum(hist)):
pass
else:
assert False
def test_image_invert():
img = Image(testimage2)
clr = img[1,1]
img = img.invert()
if (clr[0] == (255 - img[1,1][0])):
pass
else:
assert False
def test_image_size():
img = Image(testimage2)
(width, height) = img.size()
if type(width) == int and type(height) == int and width > 0 and height > 0:
pass
else:
assert False
def test_image_drawing():
img = Image(testimageclr)
img.drawCircle((5, 5), 3)
img.drawLine((5, 5), (5, 8))
def test_image_splitchannels():
img = Image(testimageclr)
(r, g, b) = img.splitChannels(True)
(red, green, blue) = img.splitChannels()
pass
def test_image_histogram():
img = Image(testimage2)
h = img.histogram(25)
for i in h:
if type(i) != int:
assert False
pass
def test_detection_lines():
img = Image(testimage2)
lines = img.findLines()
if(lines == 0 or lines == None):
assert False
def test_detection_feature_measures():
img = Image(testimage2)
fs = FeatureSet()
fs.append(Corner(img, 5, 5))
fs.append(Line(img, ((2, 2), (3,3))))
print(fs)
#if BLOBS_ENABLED:
bm = BlobMaker()
result = bm.extract(img)
fs.extend(result)
print(fs)
#fs.append(img.findBlobs()[0])
#if ZXING_ENABLED:
# fake_barcode = Barcode(img, zxing.BarCode("""
#file:default.png (format: FAKE_DATA, type: TEXT):
#Raw result:
#foo-bar|the bar of foo
#Parsed result:
#foo-bar
#the bar of foo
#Also, there were 4 result points:
# Point 0: (24.0,18.0)
# Point 1: (21.0,196.0)
# Point 2: (201.0,198.0)
# Point 3: (205.23952,21.0)
#"""))
#fs.append(fake_barcode)
for f in fs:
a = f.area()
l = f.length()
c = f.meanColor()
d = f.colorDistance()
th = f.angle()
pts = f.coordinates()
dist = f.distanceFrom() #distance from center of image
fs2 = fs.sortAngle()
fs3 = fs.sortLength()
fs4 = fs.sortColorDistance()
fs5 = fs.sortArea()
fs1 = fs.sortDistance()
def test_detection_blobs():
if not BLOBS_ENABLED:
return None
img = Image(testbarcode)
blobs = img.findBlobs()
if blobs == None:
assert False
def test_detection_blobs_adaptive():
if not BLOBS_ENABLED:
return None
img = Image(testimage)
blobs = img.findBlobs(-1, threshblocksize=99)
if blobs == None:
assert False
def test_detection_barcode():
if not ZXING_ENABLED:
return None
nocode = Image(testimage).findBarcode()
if nocode: #we should find no barcode in our test image
assert False
code = Image(testbarcode).findBarcode()
if code.points:
pass
def test_detection_x():
tmpX = Image(testimage).findLines().x()[0]
if (tmpX > 0 and Image(testimage).size()[0]):
pass
else:
assert False
def test_detection_y():
tmpY = Image(testimage).findLines().y()[0]
if (tmpY > 0 and Image(testimage).size()[0]):
pass
else:
assert False
def test_detection_area():
img = Image(testimage2)
bm = BlobMaker()
result = bm.extract(img)
area_val = result[0].area()
if(area_val > 0):
pass
else:
assert False
def test_detection_angle():
angle_val = Image(testimage).findLines().angle()[0]
def test_image():
img = Image(testimage)
if(isinstance(img, Image)):
pass
else:
assert False
def test_color_colordistance():
img = Image(blackimage)
(r,g,b) = img.splitChannels()
avg = img.meanColor()
c1 = Corner(img, 1, 1)
c2 = Corner(img, 1, 2)
if (c1.colorDistance(c2.meanColor()) != 0):
assert False
if (c1.colorDistance((0,0,0)) != 0):
assert False
if (c1.colorDistance((0,0,255)) != 255):
assert False
if (c1.colorDistance((255,255,255)) != sqrt(255**2 * 3)):
assert False
pass
def test_detection_length():
img = Image(testimage)
val = img.findLines().length()
if (val == None):
assert False
if (not isinstance(val, np.ndarray)):
assert False
if (len(val) < 0):
assert False
pass
def test_detection_sortangle():
img = Image(testimage)
val = img.findLines().sortAngle()
if(val[0].x < val[1].x):
pass
else:
assert False
def test_detection_sortarea():
img = Image(testimage)
bm = BlobMaker()
result = bm.extract(img)
val = result.sortArea()
#FIXME: Find blobs may appear to be broken. Returning type none
def test_detection_sortLength():
img = Image(testimage)
val = img.findLines().sortLength()
#FIXME: Length is being returned as euclidean type, believe we need a universal type, either Int or scvINT or something.
#def test_distanceFrom():
#def test_sortColorDistance():
#def test_sortDistance():
def test_image_add():
imgA = Image(blackimage)
imgB = Image(whiteimage)
imgC = imgA + imgB
def test_color_curve_HSL():
y = np.array([[0,0],[64,128],[192,128],[255,255]]) #These are the weights
curve = ColorCurve(y)
img = Image(testimage)
img2 = img.applyHLSCurve(curve,curve,curve)
img3 = img-img2
c = img3.meanColor()
print(c)
if( c[0] > 2.0 or c[1] > 2.0 or c[2] > 2.0 ): #there may be a bit of roundoff error
assert False
def test_color_curve_RGB():
y = np.array([[0,0],[64,128],[192,128],[255,255]]) #These are the weights
curve = ColorCurve(y)
img = Image(testimage)
img2 = img.applyRGBCurve(curve,curve,curve)
img3 = img-img2
c = img3.meanColor()
if( c[0] > 1.0 or c[1] > 1.0 or c[2] > 1.0 ): #there may be a bit of roundoff error
assert False
def test_color_curve_GRAY():
y = np.array([[0,0],[64,128],[192,128],[255,255]]) #These are the weights
curve = ColorCurve(y)
img = Image(testimage)
gray = img.grayscale()
img2 = img.applyIntensityCurve(curve)
print(gray.meanColor())
print(img2.meanColor())
g=gray.meanColor()
i2=img2.meanColor()
if( g[0]-i2[0] > 1 ): #there may be a bit of roundoff error
assert False
def test_image_dilate():
img=Image(barcode)
img2 = img.dilate(20)
c=img2.meanColor()
print(c)
if( c[0] < 254 or c[1] < 254 or c[2] < 254 ):
assert False;
def test_image_erode():
img=Image(barcode)
img2 = img.erode(100)
c=img2.meanColor()
print(c)
if( c[0] > 0 or c[1] > 0 or c[2] > 0 ):
assert False;
def test_image_morph_open():
img = Image(barcode);
erode= img.erode()
dilate = erode.dilate()
result = img.morphOpen()
test = result-dilate
c=test.meanColor()
print(c)
if( c[0] > 1 or c[1] > 1 or c[2] > 1 ):
assert False;
def test_image_morph_close():
img = Image(barcode)
dilate = img.dilate()
erode = dilate.erode()
result = img.morphClose()
test = result-erode
c=test.meanColor()
print(c)
if( c[0] > 1 or c[1] > 1 or c[2] > 1 ):
assert False;
def test_image_morph_grad():
img = Image(barcode)
dilate = img.dilate()
erode = img.erode()
dif = dilate-erode
result = img.morphGradient()
test = result-dif
c=test.meanColor()
if( c[0] > 1 or c[1] > 1 or c[2] > 1 ):
assert False
def test_image_rotate_fixed():
img = Image(testimage2)
img2=img.rotate(180, scale = 1)
img3=img.flipVertical()
img4=img3.flipHorizontal()
test = img4-img2
c=test.meanColor()
print(c)
if( c[0] > 5 or c[1] > 5 or c[2] > 5 ):
assert False
def test_image_rotate_full():
img = Image(testimage2)
img2=img.rotate(180,"full",scale = 1)
c1=img.meanColor()
c2=img2.meanColor()
if( abs(c1[0]-c2[0]) > 5 or abs(c1[1]-c2[1]) > 5 or abs(c1[2]-c2[2]) > 5 ):
assert False
def test_image_shear_warp():
img = Image(testimage2)
dst = ((img.width/2,0),(img.width-1,img.height/2),(img.width/2,img.height-1))
s = img.shear(dst)
color = s[0,0]
if (color != (0,0,0)):
assert False
dst = ((img.width*0.05,img.height*0.03),(img.width*0.9,img.height*0.1),(img.width*0.8,img.height*0.7),(img.width*0.2,img.height*0.9))
w = img.warp(dst)
color = s[0,0]
if (color != (0,0,0)):
assert False
pass
def test_image_affine():
img = Image(testimage2)
src = ((0,0),(img.width-1,0),(img.width-1,img.height-1))
dst = ((img.width/2,0),(img.width-1,img.height/2),(img.width/2,img.height-1))
aWarp = cv.CreateMat(2,3,cv.CV_32FC1)
cv.GetAffineTransform(src,dst,aWarp)
atrans = img.transformAffine(aWarp)
aWarp2 = np.array(aWarp)
atrans2 = img.transformAffine(aWarp2)
test = atrans-atrans2
c=test.meanColor()
if( c[0] > 1 or c[1] > 1 or c[2] > 1 ):
assert False
def test_image_perspective():
img = Image(testimage2)
src = ((0,0),(img.width-1,0),(img.width-1,img.height-1),(0,img.height-1))
dst = ((img.width*0.05,img.height*0.03),(img.width*0.9,img.height*0.1),(img.width*0.8,img.height*0.7),(img.width*0.2,img.height*0.9))
pWarp = cv.CreateMat(3,3,cv.CV_32FC1)
cv.GetPerspectiveTransform(src,dst,pWarp)
ptrans = img.transformPerspective(pWarp)
pWarp2 = np.array(pWarp)
ptrans2 = img.transformPerspective(pWarp2)
test = ptrans-ptrans2
c=test.meanColor()
if( c[0] > 1 or c[1] > 1 or c[2] > 1 ):
assert False
def test_image_horz_scanline():
img = Image(logo)
sl = img.getHorzScanline(10)
if( sl.shape[0]!=img.width or sl.shape[1]!=3 ):
assert False
def test_image_vert_scanline():
img = Image(logo)
sl = img.getVertScanline(10)
if( sl.shape[0]!=img.height or sl.shape[1]!=3 ):
assert False
def test_image_horz_scanline_gray():
img = Image(logo)
sl = img.getHorzScanlineGray(10)
if( sl.shape[0]!=img.width or sl.shape[1]!=1 ):
assert False
def test_image_vert_scanline_gray():
img = Image(logo)
sl = img.getVertScanlineGray(10)
if( sl.shape[0]!=img.height or sl.shape[1]!=1 ):
assert False
def test_image_get_pixel():
img = Image(logo)
px = img.getPixel(0,0)
if(px[0] != 0 or px[1] != 0 or px[2] != 0 ):
assert False
def test_image_get_gray_pixel():
img = Image(logo)
px = img.getGrayPixel(0,0)
if(px != 0):
assert False
def test_camera_calibration():
fakeCamera = FrameSource()
path = "../sampleimages/CalibImage"
ext = ".png"
imgs = []
for i in range(0,10):
fname = path+str(i)+ext
img = Image(fname)
imgs.append(img)
fakeCamera.calibrate(imgs)
#we're just going to check that the function doesn't puke
mat = fakeCamera.getCameraMatrix()
if( type(mat) != cv.cvmat ):
assert False
#we're also going to test load in save in the same pass
matname = "TestCalibration"
if( False == fakeCamera.saveCalibration(matname)):
assert False
if( False == fakeCamera.loadCalibration(matname)):
assert False
def test_camera_undistort():
fakeCamera = FrameSource()
fakeCamera.loadCalibration("Default")
img = Image("../sampleimages/CalibImage0.png")
img2 = fakeCamera.undistort(img)
if( not img2 ): #right now just wait for this to return
assert False
def test_image_crop():
img = Image(logo)
x = 5
y = 6
w = 10
h = 20
crop = img.crop(x,y,w,h)
crop2 = img[x:(x+w),y:(y+h)]
diff = crop-crop2;
c=diff.meanColor()
if( c[0] > 0 or c[1] > 0 or c[2] > 0 ):
assert False
def test_image_region_select():
img = Image(logo)
x1 = 0
y1 = 0
x2 = img.width
y2 = img.height
crop = img.regionSelect(x1,y1,x2,y2)
diff = crop-img;
c=diff.meanColor()
if( c[0] > 0 or c[1] > 0 or c[2] > 0 ):
assert False
def test_image_subtract():
imgA = Image(logo)
imgB = Image(logo_inverted)
imgC = imgA - imgB
def test_image_negative():
imgA = Image(logo)
imgB = -imgA
def test_image_divide():
imgA = Image(logo)
imgB = Image(logo_inverted)
imgC = imgA / imgB
def test_image_and():
imgA = Image(logo)
imgB = Image(logo_inverted)
imgC = imgA and imgB
def test_image_or():
imgA = Image(logo)
imgB = Image(logo_inverted)
imgC = imgA or imgB
def test_image_edgemap():
imgA = Image(logo)
imgB = imgA._getEdgeMap()
def test_color_colormap_build():
cm = ColorModel()
cm.add(Image(testimage))
cm.add((127,127,127))
if(cm.contains((127,127,127))):
cm.remove((127,127,127))
else:
assert False
img = cm.threshold(Image(testimage))
c=img.meanColor()
if( c[0] > 1 or c[1] > 1 or c[2] > 1 ):
assert False
cm.save("temp.txt")
cm2 = ColorModel()
cm2.load("temp.txt")
img = cm2.threshold(Image(testimage))
c=img.meanColor()
if( c[0] > 1 or c[1] > 1 or c[2] > 1 ):
assert False
def test_feature_height():
imgA = Image(logo)
lines = imgA.findLines(1)
heights = lines.height()
if(len(heights) <= 0 ):
assert False
else:
pass
def test_feature_width():
imgA = Image(logo)
lines = imgA.findLines(1)
widths = lines.width()
if(len(widths) <= 0):
assert False
else:
pass
def test_feature_crop():
imgA = Image(logo)
lines = imgA.findLines(1)
croppedImages = lines.crop()
if(len(croppedImages) <= 0):
assert False
else:
pass
def test_color_conversion_func_BGR():
#we'll just go through the space to make sure nothing blows up
img = Image(testimage)
bgr = img.toBGR()
rgb = img.toRGB()
hls = img.toHLS()
hsv = img.toHSV()
xyz = img.toXYZ()
foo = bgr.toBGR()
foo = bgr.toRGB()
foo = bgr.toHLS()
foo = bgr.toHSV()
foo = bgr.toXYZ()
def test_color_conversion_func_RGB():
img = Image(testimage)
if( not img.isBGR() ):
assert False
rgb = img.toRGB()
foo = rgb.toBGR()
if( not foo.isBGR() ):
assert False
foo = rgb.toRGB()
if( not foo.isRGB() ):
assert False
foo = rgb.toHLS()
if( not foo.isHLS() ):
assert False
foo = rgb.toHSV()
if( not foo.isHSV() ):
assert False
foo = rgb.toXYZ()
if( not foo.isXYZ() ):
assert False
def test_color_conversion_func_HSV():
img = Image(testimage)
hsv = img.toHSV()
foo = hsv.toBGR()
foo = hsv.toRGB()
foo = hsv.toHLS()
foo = hsv.toHSV()
foo = hsv.toXYZ()
def test_color_conversion_func_HLS():
img = Image(testimage)
hls = img.toHLS()
foo = hls.toBGR()
foo = hls.toRGB()
foo = hls.toHLS()
foo = hls.toHSV()
foo = hls.toXYZ()
def test_color_conversion_func_XYZ():
img = Image(testimage)
xyz = img.toXYZ()
foo = xyz.toBGR()
foo = xyz.toRGB()
foo = xyz.toHLS()
foo = xyz.toHSV()
foo = xyz.toXYZ()
def test_blob_maker():
img = Image("../sampleimages/blockhead.png")
blobber = BlobMaker()
results = blobber.extract(img)
print(len(results))
if( len(results) != 7 ):
assert False
def test_blob_holes():
img = Image("../sampleimages/blockhead.png")
blobber = BlobMaker()
blobs = blobber.extract(img)
count = 0
for b in blobs:
if( b.mHoleContour is not None ):
count = count + len(b.mHoleContour)
if( count != 7 ):
assert False
def test_blob_hull():
img = Image("../sampleimages/blockhead.png")
blobber = BlobMaker()
blobs = blobber.extract(img)
for b in blobs:
if( len(b.mConvexHull) < 3 ):
assert False
def test_blob_data():
img = Image("../sampleimages/blockhead.png")
blobber = BlobMaker()
blobs = blobber.extract(img)
for b in blobs:
if(b.mArea > 0):
pass
if(b.mPerimeter > 0):
pass
if(sum(b.mAvgColor) > 0 ):
pass
if(sum(b.mBoundingBox) > 0 ):
pass
if(b.m00 is not 0 and
b.m01 is not 0 and
b.m10 is not 0 and
b.m11 is not 0 and
b.m20 is not 0 and
b.m02 is not 0 and
b.m21 is not 0 and
b.m12 is not 0 ):
pass
if(sum(b.mHu) > 0):
pass
def test_blob_render():
img = Image("../sampleimages/blockhead.png")
blobber = BlobMaker()
blobs = blobber.extract(img)
dl = DrawingLayer((img.width,img.height))
reimg = DrawingLayer((img.width,img.height))
for b in blobs:
b.draw(color=Color.RED, alpha=128)
b.drawHoles(width=2,color=Color.BLUE)
b.drawHull(color=Color.ORANGE,width=2)
b.draw(color=Color.RED, alpha=128,layer=dl)
b.drawHoles(width=2,color=Color.BLUE,layer=dl)
b.drawHull(color=Color.ORANGE,width=2,layer=dl)
b.drawMaskToLayer(reimg,offset=b.topLeftCorner())
pass
def test_blob_methods():
img = Image("../sampleimages/blockhead.png")
blobber = BlobMaker()
blobs = blobber.extract(img)
BL = (img.width,img.height)
first = blobs[0]
for b in blobs:
b.width()
b.height()
b.area()
b.maxX()
b.minX()
b.maxY()
b.minY()
b.minRectWidth()
b.minRectHeight()
b.minRectX()
b.minRectY()
b.aspectRatio()
b.angle()
if(not b.contains((b.x,b.y))):
assert False
if(b.below((0,0))):
assert False
if(not b.left((0,0))):
assert False
if(b.above(BL)):
assert False
if( not b.right(BL)):
assert False
b.overlaps(first)
b.above(first)
b.below(first)
b.left(first)
b.right(first)
b.contains(first)
b.overlaps(first)
def test_detection_ocr():
img = Image(ocrimage)
print "TESTING OCR"
foundtext = img.readText()
print foundtext
if(len(foundtext) <= 1):
assert False
else:
pass
def test_template_match():
source = Image("../sampleimages/templatetest.png")
template = Image("../sampleimages/template.png")
t = 5
methods = ["SQR_DIFF","SQR_DIFF_NORM","CCOEFF","CCOEFF_NORM","CCORR","CCORR_NORM"]
for m in methods:
fs = source.findTemplate(template,threshold=t,method=m)
pass
#def test_get_holes()
#def test
|
#/datastore/zhenyang/bin/python
import gensim, logging
import sys
import os
from xml.etree import ElementTree
def get_parentmap(tree):
parent_map = {}
for p in tree.iter():
for c in p:
if c in parent_map:
parent_map[c].append(p)
# Or raise, if you don't want to allow this.
else:
parent_map[c] = [p]
# Or parent_map[c] = p if you don't want to allow this
return parent_map
def main():
##############
imgnet_xml_file = 'structure_released.xml'
tree = ElementTree.parse(imgnet_xml_file)
root = tree.getroot()
release_data = root[0].text
synsets = root[1]
##
#for child in synsets.iter():
# if len(child) > 0:
# continue
#wnid = child.attrib.get("wnid")
#imagepath = get_imagepath(wnid)
#if not os.path.exists(imagepath) or os.path.getsize(imagepath) == 0:
# params = {
# "wnid": wnid,
# "username": config.username,
# "accesskey": config.accesskey,
# "release": "latest",
# }
# download_file(config.synset_url, imagepath, params)
##############
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
#pretrained_model = './vectors.bin'
pretrained_model = '../freebase-vectors-skipgram1000-en.bin'
#pretrained_model = '../GoogleNews-vectors-negative300.bin'
model = gensim.models.Word2Vec.load_word2vec_format(pretrained_model, binary=True)
#model['animal']
#print model.similarity('/en/dog', '/en/cat')
#print model.similarity('/en/dog', '/en/mountain')
##############
parent_map = get_parentmap(tree)
cc = 0
for classid in open('synsets.txt', 'r').readlines():
classid = classid.strip()
#classid = 'n01807828'
#for target in synsets.findall(".//synset[@wnid='" + classid + "']"):
#print target.get('words')
#for parent in parent_map[target]:
#print parent.get('words')
target = synsets.find(".//synset[@wnid='" + classid + "']")
if target:
classnames = target.get('words').split(', ')
for classname in classnames:
classname = '/en/' + classname.replace(' ', '_')
try:
wordvec = model[classname]
except:
print classname
cc = cc + 1
#if classname not in model.keys():
# print classname
else:
print classid
cc = cc + 1
print cc
if __name__ == "__main__":
main()
mod extractVecMat
#/datastore/zhenyang/bin/python
import gensim, logging
import sys
import os
from xml.etree import ElementTree
def get_parentmap(tree):
parent_map = {}
for p in tree.iter():
for c in p:
if c in parent_map:
parent_map[c].append(p)
# Or raise, if you don't want to allow this.
else:
parent_map[c] = [p]
# Or parent_map[c] = p if you don't want to allow this
return parent_map
def main():
##############
imgnet_xml_file = 'structure_released.xml'
tree = ElementTree.parse(imgnet_xml_file)
root = tree.getroot()
release_data = root[0].text
synsets = root[1]
##
#for child in synsets.iter():
# if len(child) > 0:
# continue
#wnid = child.attrib.get("wnid")
#imagepath = get_imagepath(wnid)
#if not os.path.exists(imagepath) or os.path.getsize(imagepath) == 0:
# params = {
# "wnid": wnid,
# "username": config.username,
# "accesskey": config.accesskey,
# "release": "latest",
# }
# download_file(config.synset_url, imagepath, params)
##############
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
#pretrained_model = './vectors.bin'
pretrained_model = '../freebase-vectors-skipgram1000-en.bin'
#pretrained_model = '../GoogleNews-vectors-negative300.bin'
model = gensim.models.Word2Vec.load_word2vec_format(pretrained_model, binary=True)
#model['animal']
#print model.similarity('/en/dog', '/en/cat')
#print model.similarity('/en/dog', '/en/mountain')
##############
parent_map = get_parentmap(tree)
cc = 0
for classid in open('synsets.txt', 'r').readlines():
classid = classid.strip()
#classid = 'n01807828'
#for target in synsets.findall(".//synset[@wnid='" + classid + "']"):
#print target.get('words')
#for parent in parent_map[target]:
#print parent.get('words')
target = root.find(".//synset[@wnid='" + classid + "']")
if target:
classnames = target.get('words').split(', ')
for classname in classnames:
classname = '/en/' + classname.replace(' ', '_')
try:
wordvec = model[classname]
except:
print classname
cc = cc + 1
#if classname not in model.keys():
# print classname
else:
print classid
cc = cc + 1
print cc
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
'''
Master listens on port default 8000, but it is indtended to run on port 80.
It is web app that manages slaves.
It allows users to register, and manage their instances/containers.
Admin is allowed to modify many aspects of web app.
Sections:
Dashboard
- live stats
- earning [super-only]
- spending [user-only]
Hosts [super]
- list
- add
- remove
- update
Images [per-user, or all for super]
- list
- add
- remove
- update
Volumes
- list
- add
- remove
- update [only name]
Containers [per-user, or all for super]
- list
- create
# - clone
- start
- restart
- stop
- attach
- logs
- destory
Networking
- list
- add
- remove
- update
Settings [super]
- set title
- set logo
Profile
- update
'''
__all__ = ['app']
import os
import sys
from ConfigParser import SafeConfigParser
# requests
import requests
# werkzeug
from werkzeug.contrib.fixers import ProxyFix
# flask
from flask import (
Flask, request, session, g,
redirect, url_for, abort,
render_template, flash, jsonify,
Blueprint, abort,
send_from_directory,
current_app,
)
# flask login
from flask.ext.login import (
current_user, login_required, fresh_login_required,
login_user, logout_user, confirm_login,
)
# config
from config.flask import FlaskConfig
_flask_config_path = os.path.join('config', 'flask.conf')
if os.path.exists(_flask_config_path):
cp = SafeConfigParser()
cp.optionxform = str
cp.read(_flask_config_path)
items = cp.items('FlaskConfig')
items = dict(items)
FlaskConfig.__dict__.update(items)
# app
app = Flask(__name__)
if FlaskConfig.PROXY_FIX:
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config.from_object(FlaskConfig)
# flask-sqlalchemy
from model.db import init_db, object_to_dict, objects_to_list
db = init_db(app)
# model
from model.user import UserAccount, UserQuota, UserStat
from model.host import Host
from model.image import Image
from model.volume import Volume
from model.mount import MountPoint
from model.container import Container
from model.network import Domain, Route
# model - create all tables
db.drop_all()
db.create_all()
# create super user if does not exits
if not UserAccount.query.filter_by(username='admin').count():
user_account = UserAccount(
username = 'admin',
password = 'd0cky4rd',
email = 'dockyard@example.com',
usertype = 'super',
)
db.session.add(user_account)
db.session.commit()
# create quota for super user
if not UserQuota.query.filter_by(username='admin').count():
user_quota = UserQuota(
username = 'admin',
)
db.session.add(user_quota)
db.session.commit()
# create stat for super user
if not UserStat.query.filter_by(username='admin').count():
user_stat = UserStat(
username = 'admin',
)
db.session.add(user_stat)
db.session.commit()
# account
from account import account_blueprint, login_manager
app.register_blueprint(account_blueprint)
login_manager.init_app(app)
# dashboard
from dashboard import dashboard_blueprint
app.register_blueprint(dashboard_blueprint)
# host
from host import host_blueprint
app.register_blueprint(host_blueprint)
# image
from image import image_blueprint
app.register_blueprint(image_blueprint)
# mount
from mount import mount_blueprint
app.register_blueprint(mount_blueprint)
# volume
from volume import volume_blueprint
app.register_blueprint(volume_blueprint)
# container
from container import container_blueprint
app.register_blueprint(container_blueprint)
# network
from network import network_blueprint
app.register_blueprint(network_blueprint)
# term
from term import term_blueprint
app.register_blueprint(term_blueprint)
@app.route('/')
def index():
return redirect(url_for(FlaskConfig.DEFAULT_VIEW))
@app.route('/favicon.ico')
def favicon():
return send_from_directory(
os.path.join(app.root_path, 'static', 'dockyard', 'img'),
'favicon.ico',
mimetype='image/vnd.microsoft.icon',
)
@app.route('/robots.txt')
def robots():
return send_from_directory(
os.path.join(app.root_path, 'static', 'dockyard', 'other'),
'robots.txt',
mimetype='text/plain',
)
@app.errorhandler(403)
def forbidden(e):
return render_template('403.html'), 403
@app.errorhandler(404)
def not_found(e):
return render_template('404.html'), 404
@app.errorhandler(410)
def gone(e):
return render_template('410.html'), 410
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
if __name__ == '__main__':
import argparse
# parse cli arguments
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--bind', type=str,
default='{HOST}:{PORT}'.format(**FlaskConfig.__dict__),
help='bind to host:port')
parser.add_argument('-t', '--threaded', type=bool,
default=FlaskConfig.THREADED,
help='threaded execution')
args = parser.parse_args()
# host, port
host_port = args.bind.split(':')
if len(host_port) == 1:
host = host_port[0]
port = FlaskConfig.PORT
else:
host = host_port[0]
port = int(host_port[1])
FlaskConfig.HOST = host
FlaskConfig.PORT = port
FlaskConfig.THREADED = args.threaded
# run app
app.run(
host = FlaskConfig.HOST,
port = FlaskConfig.PORT,
threaded = FlaskConfig.THREADED,
)
mount points, volume, slave
# -*- coding: utf-8 -*-
'''
Master listens on port default 8000, but it is indtended to run on port 80.
It is web app that manages slaves.
It allows users to register, and manage their instances/containers.
Admin is allowed to modify many aspects of web app.
Sections:
Dashboard
- live stats
- earning [super-only]
- spending [user-only]
Hosts [super]
- list
- add
- remove
- update
Images [per-user, or all for super]
- list
- add
- remove
- update
Volumes
- list
- add
- remove
- update [only name]
Containers [per-user, or all for super]
- list
- create
# - clone
- start
- restart
- stop
- attach
- logs
- destory
Networking
- list
- add
- remove
- update
Settings [super]
- set title
- set logo
Profile
- update
'''
__all__ = ['app']
import os
import sys
from ConfigParser import SafeConfigParser
# requests
import requests
# werkzeug
from werkzeug.contrib.fixers import ProxyFix
# flask
from flask import (
Flask, request, session, g,
redirect, url_for, abort,
render_template, flash, jsonify,
Blueprint, abort,
send_from_directory,
current_app,
)
# flask login
from flask.ext.login import (
current_user, login_required, fresh_login_required,
login_user, logout_user, confirm_login,
)
# config
from config.flask import FlaskConfig
_flask_config_path = os.path.join('config', 'flask.conf')
if os.path.exists(_flask_config_path):
cp = SafeConfigParser()
cp.optionxform = str
cp.read(_flask_config_path)
items = cp.items('FlaskConfig')
items = dict(items)
FlaskConfig.__dict__.update(items)
# app
app = Flask(__name__)
if FlaskConfig.PROXY_FIX:
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config.from_object(FlaskConfig)
# flask-sqlalchemy
from model.db import init_db, object_to_dict, objects_to_list
db = init_db(app)
# model
from model.user import UserAccount, UserQuota, UserStat
from model.host import Host
from model.image import Image
from model.volume import Volume
from model.mount import MountPoint
from model.container import Container
from model.network import Domain, Route
# model - create all tables
# db.drop_all()
db.create_all()
# create super user if does not exits
if not UserAccount.query.filter_by(username='admin').count():
user_account = UserAccount(
username = 'admin',
password = 'd0cky4rd',
email = 'dockyard@example.com',
usertype = 'super',
)
db.session.add(user_account)
db.session.commit()
# create quota for super user
if not UserQuota.query.filter_by(username='admin').count():
user_quota = UserQuota(
username = 'admin',
)
db.session.add(user_quota)
db.session.commit()
# create stat for super user
if not UserStat.query.filter_by(username='admin').count():
user_stat = UserStat(
username = 'admin',
)
db.session.add(user_stat)
db.session.commit()
# account
from account import account_blueprint, login_manager
app.register_blueprint(account_blueprint)
login_manager.init_app(app)
# dashboard
from dashboard import dashboard_blueprint
app.register_blueprint(dashboard_blueprint)
# host
from host import host_blueprint
app.register_blueprint(host_blueprint)
# image
from image import image_blueprint
app.register_blueprint(image_blueprint)
# mount
from mount import mount_blueprint
app.register_blueprint(mount_blueprint)
# volume
from volume import volume_blueprint
app.register_blueprint(volume_blueprint)
# container
from container import container_blueprint
app.register_blueprint(container_blueprint)
# network
from network import network_blueprint
app.register_blueprint(network_blueprint)
# term
from term import term_blueprint
app.register_blueprint(term_blueprint)
@app.route('/')
def index():
return redirect(url_for(FlaskConfig.DEFAULT_VIEW))
@app.route('/favicon.ico')
def favicon():
return send_from_directory(
os.path.join(app.root_path, 'static', 'dockyard', 'img'),
'favicon.ico',
mimetype='image/vnd.microsoft.icon',
)
@app.route('/robots.txt')
def robots():
return send_from_directory(
os.path.join(app.root_path, 'static', 'dockyard', 'other'),
'robots.txt',
mimetype='text/plain',
)
@app.errorhandler(403)
def forbidden(e):
return render_template('403.html'), 403
@app.errorhandler(404)
def not_found(e):
return render_template('404.html'), 404
@app.errorhandler(410)
def gone(e):
return render_template('410.html'), 410
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
if __name__ == '__main__':
import argparse
# parse cli arguments
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--bind', type=str,
default='{HOST}:{PORT}'.format(**FlaskConfig.__dict__),
help='bind to host:port')
parser.add_argument('-t', '--threaded', type=bool,
default=FlaskConfig.THREADED,
help='threaded execution')
args = parser.parse_args()
# host, port
host_port = args.bind.split(':')
if len(host_port) == 1:
host = host_port[0]
port = FlaskConfig.PORT
else:
host = host_port[0]
port = int(host_port[1])
FlaskConfig.HOST = host
FlaskConfig.PORT = port
FlaskConfig.THREADED = args.threaded
# run app
app.run(
host = FlaskConfig.HOST,
port = FlaskConfig.PORT,
threaded = FlaskConfig.THREADED,
)
|
#!/usr/bin/env python2
# fMBT, free Model Based Testing tool
# Copyright (c) 2016-2018, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
# This library implements pico-sized multiplatform shell
"""Pico-sized multiplatform shell
"""
# pylint: disable = redefined-builtin, eval-used, exec-used, invalid-name
# pylint: disable = missing-docstring, global-statement, unneeded-not
# pylint: disable = bare-except, broad-except, too-many-branches
# pylint: disable = singleton-comparison
import atexit
import base64
import datetime
import difflib
import fnmatch
import getopt
import getpass
import glob
import inspect
import md5
import os
import re
import shlex
import shutil
import signal
import socket
import subprocess
import sys
import tarfile
import time
import types
import urllib2
import zipfile
try:
import pythonshare
except ImportError:
pythonshare = None
try:
import recb
except ImportError:
recb = None
if os.name == "nt":
import ctypes
_g_pipe_filename = "pycosh.pipe.%s" % (os.getpid(),)
_g_pipe_has_data = False
_g_pyenv = {}
def _file(filename, mode="rb"):
try:
return file(filename, mode)
except IOError, e:
raise ValueError(str(e).replace(":", ""))
def _write_b64(filename, b64data):
file(filename, "wb").write(base64.b64decode(b64data))
def _getopts(args, shortopts, longopts=()):
try:
opts, remainder = getopt.gnu_getopt(args, shortopts, longopts)
except getopt.GetoptError, e:
raise Exception("Options: -%s (%s)" % (shortopts, e))
return dict(opts), remainder
def _human_readable_size(size):
scale = "BkMGTPEZY"
divisions = 0
while size >= 1000:
size = size / 1024.0
divisions += 1
return "%.1f%s" % (size, scale[divisions])
def _output(s):
sys.stdout.write(s)
sys.stdout.flush()
def _shell_soe(cmd):
try:
p = subprocess.Popen(cmd, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
status = p.returncode
except OSError:
status, out, err = None, None, None
if out != None and sys.stdout.encoding:
out = out.decode(sys.stdout.encoding).encode("utf-8")
if err != None and sys.stderr.encoding:
err = err.decode(sys.stderr.encoding).encode("utf-8")
return status, out, err
def cmd2py(cmdline):
if "|" in cmdline:
cmd_left, cmd_right = cmdline.split("|", 1)
funccall = "pipe(%s, %s)" % (repr(cmd2py(cmd_left)),
repr(cmd2py(cmd_right)))
elif ">" in cmdline:
cmd_left, cmd_right = cmdline.split(">", 1)
filenames = shlex.split(cmd_right)
if len(filenames) != 1:
raise ValueError('right side of > must have single filename')
funccall = "pipe(%s, %s)" % (
repr(cmd2py(cmd_left)),
repr("redir(%r)" % (filenames[0],)))
else:
cmdline_list = shlex.split(cmdline.strip())
funcname = cmdline_list[0]
args = cmdline_list[1:]
funccall = funcname + repr(tuple(args))
return funccall
if not recb is None:
_parsed_call = []
_parsed_calls = []
_parsed_oper = []
def _prog_args_parsed(*args):
_parsed_calls.append("%s%s" % (
_parsed_call[0], repr(tuple(_parsed_call[1:]))))
_parsed_call[:] = []
def _call_seq_parsed(*args):
if _parsed_oper:
for oper in _parsed_oper:
if oper == ";":
_parsed_calls[0:2] = ["cat_eval(%s, %s)" % (repr(_parsed_calls[0]), repr(_parsed_calls[1]))]
elif oper == "|":
_parsed_calls[0:2] = ["pipe(%s, %s)" % (repr(_parsed_calls[0]), repr(_parsed_calls[1]))]
_parsed_oper[:] = []
_PROG = recb.pattern(r"(?P<func>[a-zA-Z_0-9]+)\s*",
cb=lambda _1, m, _2: _parsed_call.append(m.groupdict()['func']))
_ARG_QUOTED = recb.pattern(r"'(?P<arg>[^']*)'\s*")
_ARG_DOUBLE_QUOTED = recb.pattern(r'"(?P<arg>[^"]*)"\s*')
_ARG_SPACE_SEP = recb.pattern(r"(?P<arg>[^\s;|()]+)\s*")
_CMD_CALL = (
_PROG + recb.many(
recb.any(_ARG_QUOTED, _ARG_DOUBLE_QUOTED, _ARG_SPACE_SEP).set_cb(
lambda _1, m, _2: _parsed_call.append(m[1].groupdict()['arg'])))
).set_ca(_prog_args_parsed)
_CMD_CONT = (recb.pattern(r"(?P<oper>[|;])\s*").set_cb(
lambda _1, m, _2: _parsed_oper.append(m.groupdict()['oper']))
+ _CMD_CALL)
_CMD_PIPE_SEQ = (_CMD_CALL + recb.many(_CMD_CONT)).set_ca(_call_seq_parsed)
_CMD_GROUP = (recb.pattern(r"\(\s*") +
_CMD_PIPE_SEQ +
recb.pattern(r"\)\s*") + recb.many(_CMD_CONT))
_CMD = (_CMD_GROUP | _CMD_PIPE_SEQ).set_patterns({'GROUP': _CMD_GROUP})
def _test_cmd2py_newparser(cmdline):
if recb is None:
raise ImportError('recb required')
_CMD.debug(interactive=False).parse("")
_CMD.debug(interactive=False).parse("ls 'i'")
_CMD.debug(interactive=False).parse('ls I "I"')
_CMD.debug(interactive=False).parse('echo I "i I" | grep i')
_CMD.debug(interactive=False).parse('echo I "i I" | grep I | grep i')
_CMD.debug(interactive=False).parse("(echo test.txt; ls) | grep '1 2 3'")
def cmd2py_newparser(cmdline):
_parsed_call[:] = []
_parsed_calls[:] = []
_parsed_oper[:] = []
_, unparsed = _CMD.parse(cmdline)
if unparsed:
view_chars = 10 # how many chars around syntax error is shown in error message
error_pos = len(cmdline) - len(unparsed) + 1
str_at_pos = (repr(cmdline[max(error_pos-view_chars, 0):error_pos]) +
"<--[error pos]" +
repr(cmdline[error_pos:min(len(cmdline)-1, error_pos+view_chars)]))
raise ValueError('syntax error at pos %s: %s' % (
len(cmdline) - len(unparsed),
str_at_pos))
if len(_parsed_calls) == 1:
return _parsed_calls[0]
else:
raise ValueError('parse error')
def prompt():
"""prompt
print prompt"""
try:
user = getpass.getuser()
except Exception:
user = ""
try:
hostname = socket.gethostname()
except Exception:
hostname = ""
return (user + "@" +
hostname + ":" +
os.getcwd().replace("\\", "/") + ": ")
def awk(prog, *args):
"""awk PROG [FILE...]
PROG syntax: [/REGEXP/]{print $N...}"""
filenames = expand(*args, accept_pipe=True).splitlines()
if not filenames:
raise ValueError("missing input")
rv = []
awk_syntax = re.compile('(/([^/]*)/)?\{([^}]*)\}')
parsed_prog = awk_syntax.match(prog)
if not parsed_prog:
raise ValueError('syntax error in awk program')
awk_pattern = parsed_prog.group(2)
if not awk_pattern is None:
awk_pattern_re = re.compile(awk_pattern)
else:
awk_pattern_re = re.compile("")
awk_statements = [s.strip() for s in parsed_prog.group(3).split(";")]
awk_fieldno_re = re.compile("\$([0-9]+)")
awk_fieldsep_re = re.compile("[ \n\t\r]*")
for filename in filenames:
for line in open(filename).xreadlines():
if awk_pattern_re.search(line):
for stmt in awk_statements:
if stmt.startswith("print"):
what = stmt[5:].strip()
if not what:
# plain "print" results in full line
what = "$0"
else:
# no variable handling for now...
what = what.replace('"', '')
fields = [int(n) for n in awk_fieldno_re.findall(what)]
translate = {}
if fields:
line_fields = [line.splitlines()[0]] + [
l for l in awk_fieldsep_re.split(line) if l]
for field in fields:
if field < len(line_fields):
translate["$" + str(field)] = line_fields[field]
else:
translate["$" + str(field)] = ""
for rep in reversed(sorted(translate.keys())):
# if not reversed, might replace $1 before $10
what = what.replace(rep, translate[rep])
rv.append(what)
return "\n".join(rv)
def cd(dirname):
"""cd DIRNAME
change current working directory"""
d = expand(dirname, accept_pipe=False, min=1, exist=True).splitlines()
if len(d) > 1:
raise ValueError("ambiguous directory name")
os.chdir(os.path.join(os.getcwd(), d[0]))
return ""
def curl(*args):
"""curl [-x P][-o FILE] URL
download URL (use proxy P), save to FILE"""
opts, urls = _getopts(args, "x:o:")
rv = []
if not urls:
raise ValueError("missing URL(s)")
if "-x" not in opts and "http_proxy" in _g_pyenv:
opts["-x"] = _g_pyenv.get("http_proxy")
if "-x" in opts:
proxy = urllib2.ProxyHandler({
'http': opts["-x"],
'https': opts["-x"],
'ftp': opts["-x"]})
else:
proxy = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
for url in urls:
data = urllib2.urlopen(url).read()
if "-o" in opts:
_file(opts["-o"], "a").write(data)
else:
rv.append(data)
return "".join(rv)
def find(*args):
"""find [-n NAME][-i][-t T][-p a] DIR...
find under DIR(s), see help(find)"""
# -n NAME: search entries matching wildcard pattern NAME
# -i: ignore case
# -t f: (type=file) match only files
# -t d: (type=dir) match only directories
# -p a: print absolute paths
opts, remainder = _getopts(args, "n:t:ip:")
dirnames = expand(*remainder, exist=True, accept_pipe=False).splitlines()
if not dirnames:
raise ValueError("missing DIR")
if "-n" in opts:
findname = opts["-n"]
else:
findname = "*"
if "-p" in opts:
if opts["-p"] == "a":
print_absolute_names = True
else:
raise ValueError("invalid print option -p %r, supported: 'a'" %
(opts["-p"],))
else:
print_absolute_names = False
if "-i" in opts:
ignore_case = True
else:
ignore_case = False
if "-t" in opts:
findtype = opts["-t"].lower()
if findtype not in ["f", "d"]:
raise ValueError("find type must be 'f' (file) or 'd' (directory)")
else:
findtype = None
rv = []
for dirname in dirnames:
dirname_ends_with_sep = dirname[-1] in ["/", "\\"]
slash_only = not "\\" in dirname
if slash_only:
sep = "/"
else:
sep = os.path.sep
# DIR + NAME forms a path without duplicate path separators:
# if (and only if) DIR ends with /, then NAME does not start with /
for root, dirs, files in os.walk(dirname):
if slash_only:
root = root.replace("\\", "/")
if findtype:
dirs_set = set(dirs)
files_set = set(files)
for name in dirs + files:
if ((ignore_case == False and fnmatch.fnmatch(name, findname)) or
(ignore_case == True and fnmatch.fnmatch(name.lower(), findname.lower()))):
if (findtype == "f" and name not in files_set):
continue # skip not-a-file from find -t f ...
elif (findtype == "d" and name not in dirs_set):
continue # skip not-a-dir from find -t d ...
if print_absolute_names:
rv.append(os.path.abspath(root + sep + name).replace('\\','/'))
else:
if root == dirname:
if dirname_ends_with_sep:
rv.append(name)
else:
rv.append(sep + name)
else:
rv.append(root[len(dirname):] + sep + name)
return "\n".join(rv)
def date():
"""date
print current date and time"""
t = datetime.datetime.now()
return t.strftime("%Y-%m-%d %H:%M:%S.") + str(t.microsecond)
def diff(*args):
"""diff FILE1 FILE2
print differences between two files"""
files = expand(*args).splitlines()
try:
file1, file2 = files
except Exception:
raise ValueError("exactly two files required")
lines1 = _file(file1).readlines()
lines2 = _file(file2).readlines()
udiff = difflib.unified_diff(lines1, lines2, file1, file2)
# append endlines to lines where it is missing
difflines = [l + ["", "\n"][l[-1] != "\n"] for l in udiff]
return "".join(difflines)
def du(*args):
"""du [-h] FILE...
print [human readable] disk usage of FILEs"""
opts, filenames = _getopts(args, "h")
if "-h" in opts:
size_formatter = _human_readable_size
else:
size_formatter = lambda size: str(size)
filenames = expand(*filenames, accept_pipe=False, min=1).splitlines()
total_size = 0
retval = []
for direntry in filenames:
size = None
if os.path.isdir(direntry):
for root, dirs, filelist in os.walk(direntry):
for filename in filelist:
fullname = os.path.join(root, filename)
size = os.stat(fullname).st_size
retval.append("%-8s %s" % (size_formatter(size), fullname))
total_size += size
elif os.path.isfile(direntry):
size = os.stat(direntry).st_size
total_size += size
retval.append("%-8s %s" % (size_formatter(size), direntry))
retval.append(size_formatter(total_size))
return "\n".join(retval)
def echo(*args):
return " ".join(args)
def env():
"""env
print environment variables"""
rv = []
for key in sorted(_g_pyenv.keys()):
rv.append("%s=%s" % (key, repr(_g_pyenv[key])))
return "\n".join(rv)
def expand(*filenames, **kwargs):
accept_pipe = kwargs.get("accept_pipe", True)
min_count = kwargs.get("min", 0)
must_exist = kwargs.get("exist", False)
rv = []
if not filenames:
if accept_pipe and _g_pipe_has_data:
rv.append(_g_pipe_filename)
else:
for pattern in filenames:
extends_to = glob.glob(pattern)
if extends_to:
for filepath in extends_to:
if "/" in filepath and "\\" in filepath:
filepath = filepath.replace('\\', '/')
rv.append(filepath)
elif not must_exist:
rv.append(pattern)
if not min_count <= len(rv):
raise ValueError("expected at least %s file(s), got %s" %
(min_count, len(rv)))
return "\n".join(rv)
def export(assignment):
"""export VAR=VALUE
assign VALUE to environment variable VAR"""
if not "=" in assignment or not assignment.split("=")[0].strip():
raise ValueError("expected VAR=VALUE")
_g_pyenv.__setitem__(*assignment.split("=", 1))
return ""
def grep(*args):
"""grep [-iH] PATTERN [FILE...]
show matching lines in file(s)"""
opts, pattern_filenames = _getopts(args, "iH")
ignore_case = "-i" in opts
always_print_filename = "-H" in opts
try:
pattern = pattern_filenames[0]
filenames = pattern_filenames[1:]
except:
raise ValueError("grep pattern missing")
if ignore_case:
pattern = pattern.lower()
matching_lines = []
all_files = expand(*filenames).splitlines()
if len(all_files) > 1:
always_print_filename = True
prefix = ""
for filename in all_files:
if always_print_filename:
prefix = filename.replace("\\", "/") + ": "
if os.path.isdir(filename):
matching_lines.append("grep: %s: is a directory\n" % (filename,))
continue
try:
for line in file(filename).xreadlines():
if ((not ignore_case and pattern in line) or
(ignore_case and pattern in line.lower())):
matching_lines.append(prefix + line)
except IOError, e:
matching_lines.append("grep: %s: %s\n" % (filename, e))
return "".join(matching_lines)
def head(*args):
"""head [-n NUM] [FILE...]
show first NUM lines in file(s)"""
opts, filenames = _getopts(args, "n:")
all_files = expand(*filenames).splitlines()
if "-n" in opts:
lines = int(opts["-n"])
else:
lines = 10
rv = []
for filename in all_files:
line_count = 0
for line in file(filename).xreadlines():
line_count += 1
if line_count > lines:
break
rv.append(line)
return "".join(rv)
def help(func=None):
"""help [COMMAND]
print help (on COMMAND)"""
if not func:
rv = []
for c in globals().keys():
if c.startswith("_"):
continue
if not isinstance(globals()[c], types.FunctionType):
continue
if not globals()[c].__doc__:
continue
if len(globals()[c].__doc__.splitlines()) != 2:
continue
rv.append("%-26s%s" %
tuple([l.strip() for l in
globals()[c].__doc__.splitlines()]))
rv = sorted(rv)
elif isinstance(globals().get(func, None), types.FunctionType):
rv = inspect.getsource(globals().get(func)).splitlines()
return "\n".join(rv)
def kill(*pids):
"""kill PID...
terminate processes"""
for pid in pids:
os.kill(int(pid), signal.SIGTERM)
return ""
def ls(*args):
"""ls [-ld]
list files on current working directory"""
opts, filenames = _getopts(args, "ld")
files = []
if filenames:
for filename in expand(*filenames, exist=True).splitlines():
if os.path.isdir(filename) and not "-d" in opts:
root, subdirs, subfiles = os.walk(filename).next()
root = root.replace('\\', '/')
files.extend(sorted([root + "/" + d + "/" for d in subdirs]) +
sorted([root + "/" + f for f in subfiles]))
else:
files.append(filename)
else:
_, subdirs, files = os.walk(".").next()
files = sorted([d + "/" for d in subdirs]) + sorted(files)
files_outnames = []
for f in files:
if f.endswith("/"):
outname = os.path.basename(f[:-1]) + "/"
else:
outname = os.path.basename(f)
files_outnames.append((f, outname))
if "-l" in opts:
rv = []
for f, o in files_outnames:
fstat = os.stat(f)
rv.append("%10s %s %s" % (
fstat.st_size,
time.strftime("%Y-%m-%d %H:%M", time.localtime(fstat.st_mtime)),
o))
else:
rv = [o for f, o in files_outnames]
return "\n".join(rv)
def nl(*filenames):
"""nl FILE...
number lines"""
all_files = expand(*filenames).splitlines()
rv = []
line_no = 0
for filename in all_files:
for line in file(filename).xreadlines():
line_no += 1
rv.append("%5s %s" % (line_no, line))
return "".join(rv)
def mkdir(*args):
"""mkdir [-p] DIRNAME...
make directories, -p: intermediates if necessary"""
args, dirnames = _getopts(args, "-p")
for dirname in dirnames:
if "-p" in args:
os.makedirs(dirname)
else:
os.mkdir(dirname)
return ""
def redir(dst_filename):
# redirect data from input pipe to a file
src_filename = expand(accept_pipe=True)
if src_filename:
file(dst_filename, "wb").write(
file(src_filename, "rb").read())
return ""
def rm(*args):
"""rm [-r] FILE...
remove file"""
args, filenames = _getopts(args, "rf")
filenames = expand(*filenames, accept_pipe=False, min=1).splitlines()
for filename in filenames:
if "-r" in args and os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
return ""
def rmdir(dirname):
"""rmdir DIRNAME
remove directory"""
os.rmdir(dirname)
return ""
def cat(*filenames):
"""cat FILE...
concatenate contents of listed files"""
return "".join([_file(f).read() for f in expand(*filenames).splitlines()])
def df(*args):
"""df [-h] DIRNAME
print [human readable] free space on DIRNAME"""
args, dirnames = _getopts(args, "-h")
if "-h" in args:
human_readable = True
else:
human_readable = False
try:
dirname = dirnames[0]
except IndexError:
raise Exception("directory name missing")
if os.name == "nt": # Windows
cfree = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(dirname), None, None,
ctypes.byref(cfree))
free = cfree.value
else: # Posix
st = os.statvfs(dirname)
free = st.f_bavail * st.f_frsize
if human_readable:
retval = _human_readable_size(free)
else:
retval = str(free)
return retval
def md5sum(*filenames):
"""md5sum FILE...
print MD5 (128-bit) checksums."""
rv = []
for filename in expand(*filenames).splitlines():
rv.append("%-34s%s" %
(md5.md5(file(filename, "rb").read()).hexdigest(),
filename))
return "\n".join(rv)
def mv(src, dst):
"""mv SOURCE DEST
move file or directory to destination"""
shutil.move(src, dst)
return ""
def cp(src, dst):
"""cp SOURCE DEST
copy file or directory to destination"""
shutil.copy(src, dst)
return ""
def pipe(expr_left, expr_right):
global _g_pipe_has_data
try:
pipe_in_data = eval(expr_left)
file(_g_pipe_filename, "wb").write(pipe_in_data)
del pipe_in_data
_g_pipe_has_data = True
rv = eval(expr_right)
finally:
try:
os.remove(_g_pipe_filename)
except Exception:
pass
_g_pipe_has_data = False
return rv
def cat_eval(expr_left, expr_right):
left_data = eval(expr_left)
right_data = eval(expr_right)
return left_data + right_data
def ps(*args):
"""ps [-v] [PID...]
list processes (-v virtual memory)"""
args, pids = _getopts(args, "-v")
rv = []
pids = set(pids)
if os.name == "nt":
if "-v" in args:
opt_field = "PageFileUsage"
else:
opt_field = "parentprocessid"
_, o, _ = _shell_soe(
"wmic process get %s,processid,description,commandline" %
(opt_field,))
for line in o.splitlines():
try:
cmd_desc_almostlast, lastfield = line.rstrip().rsplit(" ", 1)
cmd_desc, almostlast = (
cmd_desc_almostlast.rstrip().rsplit(" ", 1))
cmd, desc = cmd_desc.rstrip().rsplit(" ", 1)
if opt_field == "PageFileUsage":
pid = lastfield
pdata = almostlast # PageFileUsage
else:
pid = lastfield
pdata = almostlast # parent pid
if not desc.lower() in cmd.strip().lower():
cmd = "[%s] %s" % (desc.strip(), cmd.strip())
if not pids or pid in pids:
rv.append("%8s %8s %s" %
(pid.strip(), pdata.strip(), cmd.strip()))
except Exception:
pass
else:
if "-v" in args:
opt_field = "size"
else:
opt_field = "ppid"
_, o, _ = _shell_soe("ps ax -o%s,pid,cmd" % (opt_field,))
for line in o.splitlines():
pdata, pid, cmd = line.strip().split(None, 2)
if not pids or pid in pids:
rv.append("%8s %8s %s" % (pid, pdata, cmd.strip()))
return "\n".join(rv)
def psh(*cmd):
"""psh COMMAND
run COMMAND in powershell (Windows)"""
_, o, e = _shell_soe(
("powershell.exe",) + cmd)
return o + e
_g_pspycosh_conn = None
def pspycosh(psconn, *cmdlines):
"""pspycosh CONNSPEC [CMD...]
open remote pycosh shell or run CMDs on it"""
global _g_pspycosh_conn
if isinstance(psconn, pythonshare.client.Connection):
_g_pspycosh_conn = psconn
close_connection = False
else:
_g_pspycosh_conn = pythonshare.connect(psconn)
close_connection = True
_g_pspycosh_conn.exec_(_g_pycosh_source)
if cmdlines:
rv = []
try:
for cmdline in cmdlines:
rv.append(pycosh_eval(cmdline))
finally:
if close_connection:
_g_pspycosh_conn.close()
_g_pspycosh_conn = None
return "".join(rv)
return ""
def _psput_file(conn, src_filename, dst_filename):
data = file(src_filename, "rb").read()
conn.eval_('file(%s, "wb").write(base64.b64decode(%s))' %
(repr(dst_filename),
repr(base64.b64encode(data))))
def _psput_dir(conn, dirname, dest_dir):
rv = []
dirname = dirname.replace("\\", "/")
dir_dest_dir = dest_dir.replace("\\", "/") + "/" + os.path.basename(dirname)
for root, dirs, files in os.walk(dirname):
file_src_dir = root.replace('\\', '/')
if file_src_dir[len(dirname):]:
file_dest_dir = (dir_dest_dir + "/" + file_src_dir[len(dirname):])
else:
file_dest_dir = dir_dest_dir
try:
conn.eval_('os.makedirs(%r)' % (file_dest_dir,))
except:
pass
for f in files:
_psput_file(conn,
file_src_dir + "/" + f,
file_dest_dir + "/" + f)
rv.append(file_src_dir + "/" + f)
return rv
def psput(psconn, pattern):
"""psput CONNSPEC[//DEST] FILE...
upload files to pythonshare server"""
# Examples:
# Put files to current working directory on host:
# psput passwd@host:port files
# Put localdir under cwd/relative/path on host:
# psput passwd@host:port//relative/path localdir
# Put localdir under /abs/path on host on Linux host:
# psput passwd@host:port///abs/path localdir
# Put localdir under c:/abs/winpath on Windows host:
# psput passwd@host:port//c:/abs/winpath localdir
# Put localdir to /abs/path on Linux host via hub/namespace:
# psput passwd@hub:port/namespace///abs/path localdir
# Check cwd on host:
# pspycosh passwd@host:port pwd
if isinstance(psconn, pythonshare.client.Connection):
dest_dir = "."
conn = psconn
close_connection = False
else:
if "//" in psconn:
psconn, dest_dir = psconn.split("//", 1)
else:
dest_dir = "."
conn = pythonshare.connect(psconn)
close_connection = True
conn.exec_("import base64, os")
rv = []
for filename in expand(pattern, accept_pipe=False).splitlines():
if os.path.isdir(filename):
rv.extend(_psput_dir(conn, filename, dest_dir))
else:
_psput_file(conn, filename, dest_dir + "/" + os.path.basename(filename))
rv.append(filename)
if close_connection:
conn.close()
return "\n".join(rv)
def psget(psconn, pattern):
"""psget CONNSPEC FILE...
download files from pythonshare server"""
# Get *.txt from host to current working directory:
# psget passwd@host:port *.txt
# Get * from host via hub to current working directory:
# psget passwd@hub/host *
# Get * from HOSTDIR on host, via hub, to current working directory:
# psget passwd@hub/host//HOSTDIR *
if isinstance(psconn, pythonshare.client.Connection):
conn = psconn
remotedir = ""
close_connection = False
elif "//" in psconn:
hostspec, remotedir = psconn.split("//", 1)
conn = pythonshare.connect(hostspec)
close_connection = True
else:
remotedir = ""
conn = pythonshare.connect(psconn)
close_connection = True
conn.exec_("".join(inspect.getsourcelines(expand)[0]))
conn.exec_("import glob")
if remotedir:
remotedir = remotedir.replace("\\", "/")
if not remotedir.endswith("/"):
remotedir = remotedir + "/"
rv = []
for filename in conn.eval_('expand(%s, accept_pipe=False)' %
repr(remotedir + pattern)).splitlines():
try:
data = conn.eval_("file(%r, 'rb').read()" % (filename,))
except:
rv.append("! error reading %r" % (filename,))
continue
file(os.path.basename(filename), "wb").write(data)
rv.append(filename)
return "\n".join(rv)
def pwd():
"""pwd
print current working directory"""
return os.getcwd().replace("\\", "/")
def pye(*code):
"""pye CODE
evaluate Python CODE"""
code = " ".join(code)
if _g_pipe_has_data:
_g_pyenv["pipe_in"] = file(expand(accept_pipe=True), "rb")
try:
return str(eval(code, globals(), _g_pyenv))
finally:
if "pipe_in" in _g_pyenv:
del _g_pyenv["pipe_in"]
def pyx(*code):
"""pyx CODE
execute Python CODE"""
code = " ".join(code)
if _g_pipe_has_data:
_g_pyenv["pipe_in"] = file(expand(accept_pipe=True), "rb")
try:
try:
exec code in globals(), _g_pyenv
except Exception, e:
return str(e)
finally:
if "pipe_in" in _g_pyenv:
del _g_pyenv["pipe_in"]
return ""
def sed(cmd, *filenames):
"""sed s/P/R[/N] [FILE]
replace P with R in FILE"""
rv = []
try:
pattern, repl, count = re.findall("s/([^/]*)/([^/]*)/(.*)", cmd)[0]
pattern = re.compile(pattern)
except:
raise ValueError('invalid command "%s"' % (cmd,))
all_files = expand(*filenames).splitlines()
for filename in all_files:
for line in file(filename).readlines():
try:
count_arg = (int(count),)
except:
if count == "g":
count_arg = ()
elif count == "":
count_arg = (1,)
else:
raise ValueError('invalid count: "%s"' % (count,))
rv.append(re.subn(* ((pattern, repl, line) + count_arg))[0])
return "".join(rv)
def sh(*cmd):
"""sh COMMAND
run COMMAND in shell"""
s, o, e = _shell_soe(" ".join(cmd))
return "[exit status: %s]\n%s" % (s, o+e)
def sleep(seconds):
"""sleep SECONDS
sleep for SECONDS (float)"""
time.sleep(float(seconds))
return ""
def sort(*args):
"""sort [-n] [-k N] [FILE]
sort lines [numerically] according to column N"""
opts, filenames = _getopts(args, "k:n")
filenames = expand(*filenames, accept_pipe=True).splitlines()
rv = []
for filename in filenames:
lines = [[l.split(), l] for l in file(filename).readlines()]
if "-k" in opts:
k = int(opts["-k"]) - 1
for line in lines:
line[0][0], line[0][k] = line[0][k], line[0][0]
if "-n" in opts:
for line in lines:
try:
line[0][0] = int(line[0][0])
except:
pass
lines.sort()
rv.extend([line[1] for line in lines])
return "".join(rv)
def sync():
"""sync
flush system write back caches"""
if os.name == "nt":
retval = str(ctypes.windll.kernel32.SetSystemFileCacheSize(-1, -1, 0))
else:
_, _, retval = _shell_soe("sync")
return retval
def tail(*args):
"""tail [-n NUM] [FILE...]
show last NUM lines in file(s)"""
opts, filenames = _getopts(args, "n:")
all_files = expand(*filenames).splitlines()
if "-n" in opts:
lines = int(opts["-n"])
else:
lines = 10
rv = []
if lines > 0:
for filename in all_files:
rv.extend(file(filename).readlines()[-lines:])
return "".join(rv)
def tar(*args):
"""tar [-ctxfC] PKG [FILE...]
create/list/extract a tar package"""
opts, filenames = _getopts(args, "ctxf:C:")
pkg = opts.get("-f", expand(accept_pipe=True))
if not pkg:
raise ValueError("package filename missing (-f)")
rv = []
if "-t" in opts:
tf = tarfile.TarFile.open(pkg)
rv.extend(tf.getnames())
elif "-x" in opts:
tf = tarfile.TarFile.open(pkg)
filenames = expand(*filenames, accept_pipe=False).splitlines()
if filenames:
for filename in filenames:
tf.extract(filename, path=opts.get('-C', os.getcwd()))
rv.append(filename)
else:
tf.extractall(path=opts.get('-C', os.getcwd()))
elif "-c" in opts:
if pkg.endswith(".bz2"):
mode = "w:bz2"
elif pkg.endswith(".gz"):
mode = "w:gz"
else:
mode = "w"
tf = tarfile.TarFile.open(pkg, mode)
filenames = expand(*filenames, accept_pipe=False).splitlines()
for filename in filenames:
tf.add(filename)
tf.close()
return "\n".join(rv)
def unzip(*args):
"""unzip [-l] [-d DEST] PKG [FILE...]
extract all or FILEs from PKG to DEST"""
opts, filenames = _getopts(args, "ld:")
filenames = expand(*filenames, min=1).splitlines()
rv = []
if "-d" in opts:
dest_dir = opts["-d"]
else:
dest_dir = os.getcwd()
if "-l" in opts:
# only list files in archive
for filename in filenames:
for zi in zipfile.ZipFile(filename).infolist():
rv.append("%8s %s %s" % (
zi.file_size,
datetime.datetime(*zi.date_time).strftime("%Y-%m-%d %H:%M"),
zi.filename))
else:
pkg, extract_files = filenames[0], filenames[1:]
zf = zipfile.ZipFile(pkg)
if extract_files:
for extract_file in extract_files:
zf.extract(extract_file,path=dest_dir)
rv.append(extract_file)
else:
zf.extractall(path=dest_dir)
rv.extend(zf.namelist())
return "\n".join(rv)
def whoami():
"""whoami
print user name"""
try:
return getpass.getuser()
except Exception:
return ""
def xargs(*args):
"""xargs CMD
run CMD with args from stdin"""
if not args:
raise ValueError("xargs: CMD missing")
if not _g_pipe_has_data:
raise ValueError("xargs: no get arguments in pipe")
retval = []
for arg in open(_g_pipe_filename):
arg = arg.strip()
funccall = args[0] + repr(tuple(args[1:]) + (arg,))
try:
func_rv = eval(funccall)
if func_rv and not func_rv.endswith("\n"):
func_rv += "\n"
retval.append(func_rv)
except Exception, e:
retval.append(str(e).splitlines()[-1] + "\n")
return "".join(retval)
def xxd(*args):
"""xxd [FILE...]
make a hexdump"""
all_files = expand(*args).splitlines()
rv = []
for filename in all_files:
addr = 0
f = file(filename, "rb")
while True:
data16 = f.read(16)
if len(data16) == 0:
break
hex_line = []
hex_line.append(("%x" % (addr,)).zfill(8) + ": ")
for bindex, byte in enumerate(data16):
hex_line.append(("%x" % (ord(byte),)).zfill(2))
if bindex & 1 == 1:
hex_line.append(" ")
s_hex_line = "".join(hex_line)
s_hex_line += " " * (51 - len(s_hex_line))
ascii_line = []
for byte in data16:
if 32 <= ord(byte) <= 126:
ascii_line.append(byte)
else:
ascii_line.append(".")
rv.append(s_hex_line + "".join(ascii_line))
addr += 16
return "\n".join(rv)
def zip(zipfilename, *filenames):
"""zip ZIPFILE [FILE...]
add FILEs to ZIPFILE"""
filenames = expand(*filenames, accept_pipe=False, min=1).splitlines()
zf = zipfile.ZipFile(zipfilename, "a")
for filename in filenames:
zf.write(filename)
zf.close()
return ""
def exit():
if os.name == "nt":
raise Exception("Close connection with Ctrl-Z + Return")
else:
raise Exception("Close connection with Ctrl-D")
def pycosh_eval(cmdline):
if _g_pspycosh_conn:
return _g_pspycosh_conn.eval_("pycosh_eval(%s)" % (repr(cmdline,)))
funccall = cmd2py(cmdline)
try:
retval = eval(funccall)
except Exception, e:
retval = str(e).splitlines()[-1]
return retval
def _parse_pycoshrc(contents):
"""returns rc settings in a dictionary"""
for line in contents.splitlines():
retval = str(pycosh_eval(line))
if retval:
_output(retval)
def _main():
histfile = os.path.join(os.path.expanduser("~"), ".pycosh_history")
rcfile = os.path.join(os.path.expanduser("~"), ".pycoshrc")
try:
rccontents = open(rcfile).read()
except:
rccontents = None
if rccontents:
_parse_pycoshrc(rccontents)
try:
import readline
try:
readline.read_history_file(histfile)
except IOError:
pass
atexit.register(readline.write_history_file, histfile)
except (ImportError, IOError):
pass
while True:
try:
cmdline = raw_input("\n" + pycosh_eval("prompt"))
except EOFError:
cmdline = None
if cmdline == None:
break
else:
cmdline = cmdline.replace("\\", "\\\\")
if cmdline.strip() == "":
retval = ""
else: # run cmdline
retval = pycosh_eval(cmdline)
_output(str(retval))
if "__file__" in globals() and "pycosh.py" in __file__:
if __file__.endswith("pycosh.py"):
_g_pycosh_source = open(__file__, "r").read()
elif __file__.endswith("pycosh.pyc"):
try:
_g_pycosh_source = open(__file__[:-1], "r").read()
except:
pass
if "_g_pycosh_source" in globals():
_g_pycosh_source = "_g_pycosh_source = %s\n%s" % (repr(_g_pycosh_source), _g_pycosh_source)
if __name__ == "__main__":
if len(sys.argv) == 1:
_main()
else:
for cmdline in sys.argv[1:]:
if cmdline != "interactive":
_output(pycosh_eval(cmdline))
else:
_main()
pycosh: osenv added
#!/usr/bin/env python2
# fMBT, free Model Based Testing tool
# Copyright (c) 2016-2018, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
# This library implements pico-sized multiplatform shell
"""Pico-sized multiplatform shell
"""
# pylint: disable = redefined-builtin, eval-used, exec-used, invalid-name
# pylint: disable = missing-docstring, global-statement, unneeded-not
# pylint: disable = bare-except, broad-except, too-many-branches
# pylint: disable = singleton-comparison
import atexit
import base64
import datetime
import difflib
import fnmatch
import getopt
import getpass
import glob
import inspect
import md5
import os
import re
import shlex
import shutil
import signal
import socket
import subprocess
import sys
import tarfile
import time
import types
import urllib2
import zipfile
try:
import pythonshare
except ImportError:
pythonshare = None
try:
import recb
except ImportError:
recb = None
if os.name == "nt":
import ctypes
_g_pipe_filename = "pycosh.pipe.%s" % (os.getpid(),)
_g_pipe_has_data = False
_g_pyenv = {}
def _file(filename, mode="rb"):
try:
return file(filename, mode)
except IOError, e:
raise ValueError(str(e).replace(":", ""))
def _write_b64(filename, b64data):
file(filename, "wb").write(base64.b64decode(b64data))
def _getopts(args, shortopts, longopts=()):
try:
opts, remainder = getopt.gnu_getopt(args, shortopts, longopts)
except getopt.GetoptError, e:
raise Exception("Options: -%s (%s)" % (shortopts, e))
return dict(opts), remainder
def _human_readable_size(size):
scale = "BkMGTPEZY"
divisions = 0
while size >= 1000:
size = size / 1024.0
divisions += 1
return "%.1f%s" % (size, scale[divisions])
def _output(s):
sys.stdout.write(s)
sys.stdout.flush()
def _shell_soe(cmd):
try:
p = subprocess.Popen(cmd, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
status = p.returncode
except OSError:
status, out, err = None, None, None
if out != None and sys.stdout.encoding:
out = out.decode(sys.stdout.encoding).encode("utf-8")
if err != None and sys.stderr.encoding:
err = err.decode(sys.stderr.encoding).encode("utf-8")
return status, out, err
def cmd2py(cmdline):
if "|" in cmdline:
cmd_left, cmd_right = cmdline.split("|", 1)
funccall = "pipe(%s, %s)" % (repr(cmd2py(cmd_left)),
repr(cmd2py(cmd_right)))
elif ">" in cmdline:
cmd_left, cmd_right = cmdline.split(">", 1)
filenames = shlex.split(cmd_right)
if len(filenames) != 1:
raise ValueError('right side of > must have single filename')
funccall = "pipe(%s, %s)" % (
repr(cmd2py(cmd_left)),
repr("redir(%r)" % (filenames[0],)))
else:
cmdline_list = shlex.split(cmdline.strip())
funcname = cmdline_list[0]
args = cmdline_list[1:]
funccall = funcname + repr(tuple(args))
return funccall
if not recb is None:
_parsed_call = []
_parsed_calls = []
_parsed_oper = []
def _prog_args_parsed(*args):
_parsed_calls.append("%s%s" % (
_parsed_call[0], repr(tuple(_parsed_call[1:]))))
_parsed_call[:] = []
def _call_seq_parsed(*args):
if _parsed_oper:
for oper in _parsed_oper:
if oper == ";":
_parsed_calls[0:2] = ["cat_eval(%s, %s)" % (repr(_parsed_calls[0]), repr(_parsed_calls[1]))]
elif oper == "|":
_parsed_calls[0:2] = ["pipe(%s, %s)" % (repr(_parsed_calls[0]), repr(_parsed_calls[1]))]
_parsed_oper[:] = []
_PROG = recb.pattern(r"(?P<func>[a-zA-Z_0-9]+)\s*",
cb=lambda _1, m, _2: _parsed_call.append(m.groupdict()['func']))
_ARG_QUOTED = recb.pattern(r"'(?P<arg>[^']*)'\s*")
_ARG_DOUBLE_QUOTED = recb.pattern(r'"(?P<arg>[^"]*)"\s*')
_ARG_SPACE_SEP = recb.pattern(r"(?P<arg>[^\s;|()]+)\s*")
_CMD_CALL = (
_PROG + recb.many(
recb.any(_ARG_QUOTED, _ARG_DOUBLE_QUOTED, _ARG_SPACE_SEP).set_cb(
lambda _1, m, _2: _parsed_call.append(m[1].groupdict()['arg'])))
).set_ca(_prog_args_parsed)
_CMD_CONT = (recb.pattern(r"(?P<oper>[|;])\s*").set_cb(
lambda _1, m, _2: _parsed_oper.append(m.groupdict()['oper']))
+ _CMD_CALL)
_CMD_PIPE_SEQ = (_CMD_CALL + recb.many(_CMD_CONT)).set_ca(_call_seq_parsed)
_CMD_GROUP = (recb.pattern(r"\(\s*") +
_CMD_PIPE_SEQ +
recb.pattern(r"\)\s*") + recb.many(_CMD_CONT))
_CMD = (_CMD_GROUP | _CMD_PIPE_SEQ).set_patterns({'GROUP': _CMD_GROUP})
def _test_cmd2py_newparser(cmdline):
if recb is None:
raise ImportError('recb required')
_CMD.debug(interactive=False).parse("")
_CMD.debug(interactive=False).parse("ls 'i'")
_CMD.debug(interactive=False).parse('ls I "I"')
_CMD.debug(interactive=False).parse('echo I "i I" | grep i')
_CMD.debug(interactive=False).parse('echo I "i I" | grep I | grep i')
_CMD.debug(interactive=False).parse("(echo test.txt; ls) | grep '1 2 3'")
def cmd2py_newparser(cmdline):
_parsed_call[:] = []
_parsed_calls[:] = []
_parsed_oper[:] = []
_, unparsed = _CMD.parse(cmdline)
if unparsed:
view_chars = 10 # how many chars around syntax error is shown in error message
error_pos = len(cmdline) - len(unparsed) + 1
str_at_pos = (repr(cmdline[max(error_pos-view_chars, 0):error_pos]) +
"<--[error pos]" +
repr(cmdline[error_pos:min(len(cmdline)-1, error_pos+view_chars)]))
raise ValueError('syntax error at pos %s: %s' % (
len(cmdline) - len(unparsed),
str_at_pos))
if len(_parsed_calls) == 1:
return _parsed_calls[0]
else:
raise ValueError('parse error')
def prompt():
"""prompt
print prompt"""
try:
user = getpass.getuser()
except Exception:
user = ""
try:
hostname = socket.gethostname()
except Exception:
hostname = ""
return (user + "@" +
hostname + ":" +
os.getcwd().replace("\\", "/") + ": ")
def awk(prog, *args):
"""awk PROG [FILE...]
PROG syntax: [/REGEXP/]{print $N...}"""
filenames = expand(*args, accept_pipe=True).splitlines()
if not filenames:
raise ValueError("missing input")
rv = []
awk_syntax = re.compile('(/([^/]*)/)?\{([^}]*)\}')
parsed_prog = awk_syntax.match(prog)
if not parsed_prog:
raise ValueError('syntax error in awk program')
awk_pattern = parsed_prog.group(2)
if not awk_pattern is None:
awk_pattern_re = re.compile(awk_pattern)
else:
awk_pattern_re = re.compile("")
awk_statements = [s.strip() for s in parsed_prog.group(3).split(";")]
awk_fieldno_re = re.compile("\$([0-9]+)")
awk_fieldsep_re = re.compile("[ \n\t\r]*")
for filename in filenames:
for line in open(filename).xreadlines():
if awk_pattern_re.search(line):
for stmt in awk_statements:
if stmt.startswith("print"):
what = stmt[5:].strip()
if not what:
# plain "print" results in full line
what = "$0"
else:
# no variable handling for now...
what = what.replace('"', '')
fields = [int(n) for n in awk_fieldno_re.findall(what)]
translate = {}
if fields:
line_fields = [line.splitlines()[0]] + [
l for l in awk_fieldsep_re.split(line) if l]
for field in fields:
if field < len(line_fields):
translate["$" + str(field)] = line_fields[field]
else:
translate["$" + str(field)] = ""
for rep in reversed(sorted(translate.keys())):
# if not reversed, might replace $1 before $10
what = what.replace(rep, translate[rep])
rv.append(what)
return "\n".join(rv)
def cd(dirname):
"""cd DIRNAME
change current working directory"""
d = expand(dirname, accept_pipe=False, min=1, exist=True).splitlines()
if len(d) > 1:
raise ValueError("ambiguous directory name")
os.chdir(os.path.join(os.getcwd(), d[0]))
return ""
def curl(*args):
"""curl [-x P][-o FILE] URL
download URL (use proxy P), save to FILE"""
opts, urls = _getopts(args, "x:o:")
rv = []
if not urls:
raise ValueError("missing URL(s)")
if "-x" not in opts and "http_proxy" in _g_pyenv:
opts["-x"] = _g_pyenv.get("http_proxy")
if "-x" in opts:
proxy = urllib2.ProxyHandler({
'http': opts["-x"],
'https': opts["-x"],
'ftp': opts["-x"]})
else:
proxy = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
for url in urls:
data = urllib2.urlopen(url).read()
if "-o" in opts:
_file(opts["-o"], "a").write(data)
else:
rv.append(data)
return "".join(rv)
def find(*args):
"""find [-n NAME][-i][-t T][-p a] DIR...
find under DIR(s), see help(find)"""
# -n NAME: search entries matching wildcard pattern NAME
# -i: ignore case
# -t f: (type=file) match only files
# -t d: (type=dir) match only directories
# -p a: print absolute paths
opts, remainder = _getopts(args, "n:t:ip:")
dirnames = expand(*remainder, exist=True, accept_pipe=False).splitlines()
if not dirnames:
raise ValueError("missing DIR")
if "-n" in opts:
findname = opts["-n"]
else:
findname = "*"
if "-p" in opts:
if opts["-p"] == "a":
print_absolute_names = True
else:
raise ValueError("invalid print option -p %r, supported: 'a'" %
(opts["-p"],))
else:
print_absolute_names = False
if "-i" in opts:
ignore_case = True
else:
ignore_case = False
if "-t" in opts:
findtype = opts["-t"].lower()
if findtype not in ["f", "d"]:
raise ValueError("find type must be 'f' (file) or 'd' (directory)")
else:
findtype = None
rv = []
for dirname in dirnames:
dirname_ends_with_sep = dirname[-1] in ["/", "\\"]
slash_only = not "\\" in dirname
if slash_only:
sep = "/"
else:
sep = os.path.sep
# DIR + NAME forms a path without duplicate path separators:
# if (and only if) DIR ends with /, then NAME does not start with /
for root, dirs, files in os.walk(dirname):
if slash_only:
root = root.replace("\\", "/")
if findtype:
dirs_set = set(dirs)
files_set = set(files)
for name in dirs + files:
if ((ignore_case == False and fnmatch.fnmatch(name, findname)) or
(ignore_case == True and fnmatch.fnmatch(name.lower(), findname.lower()))):
if (findtype == "f" and name not in files_set):
continue # skip not-a-file from find -t f ...
elif (findtype == "d" and name not in dirs_set):
continue # skip not-a-dir from find -t d ...
if print_absolute_names:
rv.append(os.path.abspath(root + sep + name).replace('\\','/'))
else:
if root == dirname:
if dirname_ends_with_sep:
rv.append(name)
else:
rv.append(sep + name)
else:
rv.append(root[len(dirname):] + sep + name)
return "\n".join(rv)
def date():
"""date
print current date and time"""
t = datetime.datetime.now()
return t.strftime("%Y-%m-%d %H:%M:%S.") + str(t.microsecond)
def diff(*args):
"""diff FILE1 FILE2
print differences between two files"""
files = expand(*args).splitlines()
try:
file1, file2 = files
except Exception:
raise ValueError("exactly two files required")
lines1 = _file(file1).readlines()
lines2 = _file(file2).readlines()
udiff = difflib.unified_diff(lines1, lines2, file1, file2)
# append endlines to lines where it is missing
difflines = [l + ["", "\n"][l[-1] != "\n"] for l in udiff]
return "".join(difflines)
def du(*args):
"""du [-h] FILE...
print [human readable] disk usage of FILEs"""
opts, filenames = _getopts(args, "h")
if "-h" in opts:
size_formatter = _human_readable_size
else:
size_formatter = lambda size: str(size)
filenames = expand(*filenames, accept_pipe=False, min=1).splitlines()
total_size = 0
retval = []
for direntry in filenames:
size = None
if os.path.isdir(direntry):
for root, dirs, filelist in os.walk(direntry):
for filename in filelist:
fullname = os.path.join(root, filename)
size = os.stat(fullname).st_size
retval.append("%-8s %s" % (size_formatter(size), fullname))
total_size += size
elif os.path.isfile(direntry):
size = os.stat(direntry).st_size
total_size += size
retval.append("%-8s %s" % (size_formatter(size), direntry))
retval.append(size_formatter(total_size))
return "\n".join(retval)
def echo(*args):
return " ".join(args)
def env():
"""env
print environment variables"""
rv = []
for key in sorted(_g_pyenv.keys()):
rv.append("%s=%s" % (key, repr(_g_pyenv[key])))
return "\n".join(rv)
def expand(*filenames, **kwargs):
accept_pipe = kwargs.get("accept_pipe", True)
min_count = kwargs.get("min", 0)
must_exist = kwargs.get("exist", False)
rv = []
if not filenames:
if accept_pipe and _g_pipe_has_data:
rv.append(_g_pipe_filename)
else:
for pattern in filenames:
extends_to = glob.glob(pattern)
if extends_to:
for filepath in extends_to:
if "/" in filepath and "\\" in filepath:
filepath = filepath.replace('\\', '/')
rv.append(filepath)
elif not must_exist:
rv.append(pattern)
if not min_count <= len(rv):
raise ValueError("expected at least %s file(s), got %s" %
(min_count, len(rv)))
return "\n".join(rv)
def export(assignment):
"""export VAR=VALUE
assign VALUE to environment variable VAR"""
if not "=" in assignment or not assignment.split("=")[0].strip():
raise ValueError("expected VAR=VALUE")
_g_pyenv.__setitem__(*assignment.split("=", 1))
return ""
def grep(*args):
"""grep [-iH] PATTERN [FILE...]
show matching lines in file(s)"""
opts, pattern_filenames = _getopts(args, "iH")
ignore_case = "-i" in opts
always_print_filename = "-H" in opts
try:
pattern = pattern_filenames[0]
filenames = pattern_filenames[1:]
except:
raise ValueError("grep pattern missing")
if ignore_case:
pattern = pattern.lower()
matching_lines = []
all_files = expand(*filenames).splitlines()
if len(all_files) > 1:
always_print_filename = True
prefix = ""
for filename in all_files:
if always_print_filename:
prefix = filename.replace("\\", "/") + ": "
if os.path.isdir(filename):
matching_lines.append("grep: %s: is a directory\n" % (filename,))
continue
try:
for line in file(filename).xreadlines():
if ((not ignore_case and pattern in line) or
(ignore_case and pattern in line.lower())):
matching_lines.append(prefix + line)
except IOError, e:
matching_lines.append("grep: %s: %s\n" % (filename, e))
return "".join(matching_lines)
def head(*args):
"""head [-n NUM] [FILE...]
show first NUM lines in file(s)"""
opts, filenames = _getopts(args, "n:")
all_files = expand(*filenames).splitlines()
if "-n" in opts:
lines = int(opts["-n"])
else:
lines = 10
rv = []
for filename in all_files:
line_count = 0
for line in file(filename).xreadlines():
line_count += 1
if line_count > lines:
break
rv.append(line)
return "".join(rv)
def help(func=None):
"""help [COMMAND]
print help (on COMMAND)"""
if not func:
rv = []
for c in globals().keys():
if c.startswith("_"):
continue
if not isinstance(globals()[c], types.FunctionType):
continue
if not globals()[c].__doc__:
continue
if len(globals()[c].__doc__.splitlines()) != 2:
continue
rv.append("%-26s%s" %
tuple([l.strip() for l in
globals()[c].__doc__.splitlines()]))
rv = sorted(rv)
elif isinstance(globals().get(func, None), types.FunctionType):
rv = inspect.getsource(globals().get(func)).splitlines()
return "\n".join(rv)
def kill(*pids):
"""kill PID...
terminate processes"""
for pid in pids:
os.kill(int(pid), signal.SIGTERM)
return ""
def ls(*args):
"""ls [-ld]
list files on current working directory"""
opts, filenames = _getopts(args, "ld")
files = []
if filenames:
for filename in expand(*filenames, exist=True).splitlines():
if os.path.isdir(filename) and not "-d" in opts:
root, subdirs, subfiles = os.walk(filename).next()
root = root.replace('\\', '/')
files.extend(sorted([root + "/" + d + "/" for d in subdirs]) +
sorted([root + "/" + f for f in subfiles]))
else:
files.append(filename)
else:
_, subdirs, files = os.walk(".").next()
files = sorted([d + "/" for d in subdirs]) + sorted(files)
files_outnames = []
for f in files:
if f.endswith("/"):
outname = os.path.basename(f[:-1]) + "/"
else:
outname = os.path.basename(f)
files_outnames.append((f, outname))
if "-l" in opts:
rv = []
for f, o in files_outnames:
fstat = os.stat(f)
rv.append("%10s %s %s" % (
fstat.st_size,
time.strftime("%Y-%m-%d %H:%M", time.localtime(fstat.st_mtime)),
o))
else:
rv = [o for f, o in files_outnames]
return "\n".join(rv)
def nl(*filenames):
"""nl FILE...
number lines"""
all_files = expand(*filenames).splitlines()
rv = []
line_no = 0
for filename in all_files:
for line in file(filename).xreadlines():
line_no += 1
rv.append("%5s %s" % (line_no, line))
return "".join(rv)
def mkdir(*args):
"""mkdir [-p] DIRNAME...
make directories, -p: intermediates if necessary"""
args, dirnames = _getopts(args, "-p")
for dirname in dirnames:
if "-p" in args:
os.makedirs(dirname)
else:
os.mkdir(dirname)
return ""
def redir(dst_filename):
# redirect data from input pipe to a file
src_filename = expand(accept_pipe=True)
if src_filename:
file(dst_filename, "wb").write(
file(src_filename, "rb").read())
return ""
def rm(*args):
"""rm [-r] FILE...
remove file"""
args, filenames = _getopts(args, "rf")
filenames = expand(*filenames, accept_pipe=False, min=1).splitlines()
for filename in filenames:
if "-r" in args and os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
return ""
def rmdir(dirname):
"""rmdir DIRNAME
remove directory"""
os.rmdir(dirname)
return ""
def cat(*filenames):
"""cat FILE...
concatenate contents of listed files"""
return "".join([_file(f).read() for f in expand(*filenames).splitlines()])
def df(*args):
"""df [-h] DIRNAME
print [human readable] free space on DIRNAME"""
args, dirnames = _getopts(args, "-h")
if "-h" in args:
human_readable = True
else:
human_readable = False
try:
dirname = dirnames[0]
except IndexError:
raise Exception("directory name missing")
if os.name == "nt": # Windows
cfree = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(dirname), None, None,
ctypes.byref(cfree))
free = cfree.value
else: # Posix
st = os.statvfs(dirname)
free = st.f_bavail * st.f_frsize
if human_readable:
retval = _human_readable_size(free)
else:
retval = str(free)
return retval
def md5sum(*filenames):
"""md5sum FILE...
print MD5 (128-bit) checksums."""
rv = []
for filename in expand(*filenames).splitlines():
rv.append("%-34s%s" %
(md5.md5(file(filename, "rb").read()).hexdigest(),
filename))
return "\n".join(rv)
def mv(src, dst):
"""mv SOURCE DEST
move file or directory to destination"""
shutil.move(src, dst)
return ""
def cp(src, dst):
"""cp SOURCE DEST
copy file or directory to destination"""
shutil.copy(src, dst)
return ""
def pipe(expr_left, expr_right):
global _g_pipe_has_data
try:
pipe_in_data = eval(expr_left)
file(_g_pipe_filename, "wb").write(pipe_in_data)
del pipe_in_data
_g_pipe_has_data = True
rv = eval(expr_right)
finally:
try:
os.remove(_g_pipe_filename)
except Exception:
pass
_g_pipe_has_data = False
return rv
def cat_eval(expr_left, expr_right):
left_data = eval(expr_left)
right_data = eval(expr_right)
return left_data + right_data
def osenv():
"""osenv
print operating system environment variables"""
rv = []
for key in sorted(os.environ.keys()):
rv.append("%s=%r" % (key, os.environ[key]))
return "\n".join(rv)
def ps(*args):
"""ps [-v] [PID...]
list processes (-v virtual memory)"""
args, pids = _getopts(args, "-v")
rv = []
pids = set(pids)
if os.name == "nt":
if "-v" in args:
opt_field = "PageFileUsage"
else:
opt_field = "parentprocessid"
_, o, _ = _shell_soe(
"wmic process get %s,processid,description,commandline" %
(opt_field,))
for line in o.splitlines():
try:
cmd_desc_almostlast, lastfield = line.rstrip().rsplit(" ", 1)
cmd_desc, almostlast = (
cmd_desc_almostlast.rstrip().rsplit(" ", 1))
cmd, desc = cmd_desc.rstrip().rsplit(" ", 1)
if opt_field == "PageFileUsage":
pid = lastfield
pdata = almostlast # PageFileUsage
else:
pid = lastfield
pdata = almostlast # parent pid
if not desc.lower() in cmd.strip().lower():
cmd = "[%s] %s" % (desc.strip(), cmd.strip())
if not pids or pid in pids:
rv.append("%8s %8s %s" %
(pid.strip(), pdata.strip(), cmd.strip()))
except Exception:
pass
else:
if "-v" in args:
opt_field = "size"
else:
opt_field = "ppid"
_, o, _ = _shell_soe("ps ax -o%s,pid,cmd" % (opt_field,))
for line in o.splitlines():
pdata, pid, cmd = line.strip().split(None, 2)
if not pids or pid in pids:
rv.append("%8s %8s %s" % (pid, pdata, cmd.strip()))
return "\n".join(rv)
def psh(*cmd):
"""psh COMMAND
run COMMAND in powershell (Windows)"""
_, o, e = _shell_soe(
("powershell.exe",) + cmd)
return o + e
_g_pspycosh_conn = None
def pspycosh(psconn, *cmdlines):
"""pspycosh CONNSPEC [CMD...]
open remote pycosh shell or run CMDs on it"""
global _g_pspycosh_conn
if isinstance(psconn, pythonshare.client.Connection):
_g_pspycosh_conn = psconn
close_connection = False
else:
_g_pspycosh_conn = pythonshare.connect(psconn)
close_connection = True
_g_pspycosh_conn.exec_(_g_pycosh_source)
if cmdlines:
rv = []
try:
for cmdline in cmdlines:
rv.append(pycosh_eval(cmdline))
finally:
if close_connection:
_g_pspycosh_conn.close()
_g_pspycosh_conn = None
return "".join(rv)
return ""
def _psput_file(conn, src_filename, dst_filename):
data = file(src_filename, "rb").read()
conn.eval_('file(%s, "wb").write(base64.b64decode(%s))' %
(repr(dst_filename),
repr(base64.b64encode(data))))
def _psput_dir(conn, dirname, dest_dir):
rv = []
dirname = dirname.replace("\\", "/")
dir_dest_dir = dest_dir.replace("\\", "/") + "/" + os.path.basename(dirname)
for root, dirs, files in os.walk(dirname):
file_src_dir = root.replace('\\', '/')
if file_src_dir[len(dirname):]:
file_dest_dir = (dir_dest_dir + "/" + file_src_dir[len(dirname):])
else:
file_dest_dir = dir_dest_dir
try:
conn.eval_('os.makedirs(%r)' % (file_dest_dir,))
except:
pass
for f in files:
_psput_file(conn,
file_src_dir + "/" + f,
file_dest_dir + "/" + f)
rv.append(file_src_dir + "/" + f)
return rv
def psput(psconn, pattern):
"""psput CONNSPEC[//DEST] FILE...
upload files to pythonshare server"""
# Examples:
# Put files to current working directory on host:
# psput passwd@host:port files
# Put localdir under cwd/relative/path on host:
# psput passwd@host:port//relative/path localdir
# Put localdir under /abs/path on host on Linux host:
# psput passwd@host:port///abs/path localdir
# Put localdir under c:/abs/winpath on Windows host:
# psput passwd@host:port//c:/abs/winpath localdir
# Put localdir to /abs/path on Linux host via hub/namespace:
# psput passwd@hub:port/namespace///abs/path localdir
# Check cwd on host:
# pspycosh passwd@host:port pwd
if isinstance(psconn, pythonshare.client.Connection):
dest_dir = "."
conn = psconn
close_connection = False
else:
if "//" in psconn:
psconn, dest_dir = psconn.split("//", 1)
else:
dest_dir = "."
conn = pythonshare.connect(psconn)
close_connection = True
conn.exec_("import base64, os")
rv = []
for filename in expand(pattern, accept_pipe=False).splitlines():
if os.path.isdir(filename):
rv.extend(_psput_dir(conn, filename, dest_dir))
else:
_psput_file(conn, filename, dest_dir + "/" + os.path.basename(filename))
rv.append(filename)
if close_connection:
conn.close()
return "\n".join(rv)
def psget(psconn, pattern):
"""psget CONNSPEC FILE...
download files from pythonshare server"""
# Get *.txt from host to current working directory:
# psget passwd@host:port *.txt
# Get * from host via hub to current working directory:
# psget passwd@hub/host *
# Get * from HOSTDIR on host, via hub, to current working directory:
# psget passwd@hub/host//HOSTDIR *
if isinstance(psconn, pythonshare.client.Connection):
conn = psconn
remotedir = ""
close_connection = False
elif "//" in psconn:
hostspec, remotedir = psconn.split("//", 1)
conn = pythonshare.connect(hostspec)
close_connection = True
else:
remotedir = ""
conn = pythonshare.connect(psconn)
close_connection = True
conn.exec_("".join(inspect.getsourcelines(expand)[0]))
conn.exec_("import glob")
if remotedir:
remotedir = remotedir.replace("\\", "/")
if not remotedir.endswith("/"):
remotedir = remotedir + "/"
rv = []
for filename in conn.eval_('expand(%s, accept_pipe=False)' %
repr(remotedir + pattern)).splitlines():
try:
data = conn.eval_("file(%r, 'rb').read()" % (filename,))
except:
rv.append("! error reading %r" % (filename,))
continue
file(os.path.basename(filename), "wb").write(data)
rv.append(filename)
return "\n".join(rv)
def pwd():
"""pwd
print current working directory"""
return os.getcwd().replace("\\", "/")
def pye(*code):
"""pye CODE
evaluate Python CODE"""
code = " ".join(code)
if _g_pipe_has_data:
_g_pyenv["pipe_in"] = file(expand(accept_pipe=True), "rb")
try:
return str(eval(code, globals(), _g_pyenv))
finally:
if "pipe_in" in _g_pyenv:
del _g_pyenv["pipe_in"]
def pyx(*code):
"""pyx CODE
execute Python CODE"""
code = " ".join(code)
if _g_pipe_has_data:
_g_pyenv["pipe_in"] = file(expand(accept_pipe=True), "rb")
try:
try:
exec code in globals(), _g_pyenv
except Exception, e:
return str(e)
finally:
if "pipe_in" in _g_pyenv:
del _g_pyenv["pipe_in"]
return ""
def sed(cmd, *filenames):
"""sed s/P/R[/N] [FILE]
replace P with R in FILE"""
rv = []
try:
pattern, repl, count = re.findall("s/([^/]*)/([^/]*)/(.*)", cmd)[0]
pattern = re.compile(pattern)
except:
raise ValueError('invalid command "%s"' % (cmd,))
all_files = expand(*filenames).splitlines()
for filename in all_files:
for line in file(filename).readlines():
try:
count_arg = (int(count),)
except:
if count == "g":
count_arg = ()
elif count == "":
count_arg = (1,)
else:
raise ValueError('invalid count: "%s"' % (count,))
rv.append(re.subn(* ((pattern, repl, line) + count_arg))[0])
return "".join(rv)
def sh(*cmd):
"""sh COMMAND
run COMMAND in shell"""
s, o, e = _shell_soe(" ".join(cmd))
return "[exit status: %s]\n%s" % (s, o+e)
def sleep(seconds):
"""sleep SECONDS
sleep for SECONDS (float)"""
time.sleep(float(seconds))
return ""
def sort(*args):
"""sort [-n] [-k N] [FILE]
sort lines [numerically] according to column N"""
opts, filenames = _getopts(args, "k:n")
filenames = expand(*filenames, accept_pipe=True).splitlines()
rv = []
for filename in filenames:
lines = [[l.split(), l] for l in file(filename).readlines()]
if "-k" in opts:
k = int(opts["-k"]) - 1
for line in lines:
line[0][0], line[0][k] = line[0][k], line[0][0]
if "-n" in opts:
for line in lines:
try:
line[0][0] = int(line[0][0])
except:
pass
lines.sort()
rv.extend([line[1] for line in lines])
return "".join(rv)
def sync():
"""sync
flush system write back caches"""
if os.name == "nt":
retval = str(ctypes.windll.kernel32.SetSystemFileCacheSize(-1, -1, 0))
else:
_, _, retval = _shell_soe("sync")
return retval
def tail(*args):
"""tail [-n NUM] [FILE...]
show last NUM lines in file(s)"""
opts, filenames = _getopts(args, "n:")
all_files = expand(*filenames).splitlines()
if "-n" in opts:
lines = int(opts["-n"])
else:
lines = 10
rv = []
if lines > 0:
for filename in all_files:
rv.extend(file(filename).readlines()[-lines:])
return "".join(rv)
def tar(*args):
"""tar [-ctxfC] PKG [FILE...]
create/list/extract a tar package"""
opts, filenames = _getopts(args, "ctxf:C:")
pkg = opts.get("-f", expand(accept_pipe=True))
if not pkg:
raise ValueError("package filename missing (-f)")
rv = []
if "-t" in opts:
tf = tarfile.TarFile.open(pkg)
rv.extend(tf.getnames())
elif "-x" in opts:
tf = tarfile.TarFile.open(pkg)
filenames = expand(*filenames, accept_pipe=False).splitlines()
if filenames:
for filename in filenames:
tf.extract(filename, path=opts.get('-C', os.getcwd()))
rv.append(filename)
else:
tf.extractall(path=opts.get('-C', os.getcwd()))
elif "-c" in opts:
if pkg.endswith(".bz2"):
mode = "w:bz2"
elif pkg.endswith(".gz"):
mode = "w:gz"
else:
mode = "w"
tf = tarfile.TarFile.open(pkg, mode)
filenames = expand(*filenames, accept_pipe=False).splitlines()
for filename in filenames:
tf.add(filename)
tf.close()
return "\n".join(rv)
def unzip(*args):
"""unzip [-l] [-d DEST] PKG [FILE...]
extract all or FILEs from PKG to DEST"""
opts, filenames = _getopts(args, "ld:")
filenames = expand(*filenames, min=1).splitlines()
rv = []
if "-d" in opts:
dest_dir = opts["-d"]
else:
dest_dir = os.getcwd()
if "-l" in opts:
# only list files in archive
for filename in filenames:
for zi in zipfile.ZipFile(filename).infolist():
rv.append("%8s %s %s" % (
zi.file_size,
datetime.datetime(*zi.date_time).strftime("%Y-%m-%d %H:%M"),
zi.filename))
else:
pkg, extract_files = filenames[0], filenames[1:]
zf = zipfile.ZipFile(pkg)
if extract_files:
for extract_file in extract_files:
zf.extract(extract_file,path=dest_dir)
rv.append(extract_file)
else:
zf.extractall(path=dest_dir)
rv.extend(zf.namelist())
return "\n".join(rv)
def whoami():
"""whoami
print user name"""
try:
return getpass.getuser()
except Exception:
return ""
def xargs(*args):
"""xargs CMD
run CMD with args from stdin"""
if not args:
raise ValueError("xargs: CMD missing")
if not _g_pipe_has_data:
raise ValueError("xargs: no get arguments in pipe")
retval = []
for arg in open(_g_pipe_filename):
arg = arg.strip()
funccall = args[0] + repr(tuple(args[1:]) + (arg,))
try:
func_rv = eval(funccall)
if func_rv and not func_rv.endswith("\n"):
func_rv += "\n"
retval.append(func_rv)
except Exception, e:
retval.append(str(e).splitlines()[-1] + "\n")
return "".join(retval)
def xxd(*args):
"""xxd [FILE...]
make a hexdump"""
all_files = expand(*args).splitlines()
rv = []
for filename in all_files:
addr = 0
f = file(filename, "rb")
while True:
data16 = f.read(16)
if len(data16) == 0:
break
hex_line = []
hex_line.append(("%x" % (addr,)).zfill(8) + ": ")
for bindex, byte in enumerate(data16):
hex_line.append(("%x" % (ord(byte),)).zfill(2))
if bindex & 1 == 1:
hex_line.append(" ")
s_hex_line = "".join(hex_line)
s_hex_line += " " * (51 - len(s_hex_line))
ascii_line = []
for byte in data16:
if 32 <= ord(byte) <= 126:
ascii_line.append(byte)
else:
ascii_line.append(".")
rv.append(s_hex_line + "".join(ascii_line))
addr += 16
return "\n".join(rv)
def zip(zipfilename, *filenames):
"""zip ZIPFILE [FILE...]
add FILEs to ZIPFILE"""
filenames = expand(*filenames, accept_pipe=False, min=1).splitlines()
zf = zipfile.ZipFile(zipfilename, "a")
for filename in filenames:
zf.write(filename)
zf.close()
return ""
def exit():
if os.name == "nt":
raise Exception("Close connection with Ctrl-Z + Return")
else:
raise Exception("Close connection with Ctrl-D")
def pycosh_eval(cmdline):
if _g_pspycosh_conn:
return _g_pspycosh_conn.eval_("pycosh_eval(%s)" % (repr(cmdline,)))
funccall = cmd2py(cmdline)
try:
retval = eval(funccall)
except Exception, e:
retval = str(e).splitlines()[-1]
return retval
def _parse_pycoshrc(contents):
"""returns rc settings in a dictionary"""
for line in contents.splitlines():
retval = str(pycosh_eval(line))
if retval:
_output(retval)
def _main():
histfile = os.path.join(os.path.expanduser("~"), ".pycosh_history")
rcfile = os.path.join(os.path.expanduser("~"), ".pycoshrc")
try:
rccontents = open(rcfile).read()
except:
rccontents = None
if rccontents:
_parse_pycoshrc(rccontents)
try:
import readline
try:
readline.read_history_file(histfile)
except IOError:
pass
atexit.register(readline.write_history_file, histfile)
except (ImportError, IOError):
pass
while True:
try:
cmdline = raw_input("\n" + pycosh_eval("prompt"))
except EOFError:
cmdline = None
if cmdline == None:
break
else:
cmdline = cmdline.replace("\\", "\\\\")
if cmdline.strip() == "":
retval = ""
else: # run cmdline
retval = pycosh_eval(cmdline)
_output(str(retval))
if "__file__" in globals() and "pycosh.py" in __file__:
if __file__.endswith("pycosh.py"):
_g_pycosh_source = open(__file__, "r").read()
elif __file__.endswith("pycosh.pyc"):
try:
_g_pycosh_source = open(__file__[:-1], "r").read()
except:
pass
if "_g_pycosh_source" in globals():
_g_pycosh_source = "_g_pycosh_source = %s\n%s" % (repr(_g_pycosh_source), _g_pycosh_source)
if __name__ == "__main__":
if len(sys.argv) == 1:
_main()
else:
for cmdline in sys.argv[1:]:
if cmdline != "interactive":
_output(pycosh_eval(cmdline))
else:
_main()
|
import datetime
import hashlib
import logging
import random
import time
import django
from captcha.conf import settings as captcha_settings
from django.db import models
from django.utils import timezone
if django.VERSION >= (3, 0):
from django.utils.encoding import smart_str as smart_text
else:
from django.utils.encoding import smart_text
# Heavily based on session key generation in Django
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, "SystemRandom"):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
MAX_RANDOM_KEY = 18446744073709551616 # 2 << 63
logger = logging.getLogger(__name__)
class CaptchaStore(models.Model):
challenge = models.CharField(blank=False, max_length=32)
response = models.CharField(blank=False, max_length=32)
hashkey = models.CharField(blank=False, max_length=40, unique=True)
expiration = models.DateTimeField(blank=False)
def save(self, *args, **kwargs):
self.response = self.response.lower()
if not self.expiration:
self.expiration = timezone.now() + datetime.timedelta(
minutes=int(captcha_settings.CAPTCHA_TIMEOUT)
)
if not self.hashkey:
key_ = (
smart_text(randrange(0, MAX_RANDOM_KEY))
+ smart_text(time.time())
+ smart_text(self.challenge, errors="ignore")
+ smart_text(self.response, errors="ignore")
).encode("utf8")
self.hashkey = hashlib.sha1(key_).hexdigest()
del key_
super(CaptchaStore, self).save(*args, **kwargs)
def __str__(self):
return self.challenge
def remove_expired(cls):
cls.objects.filter(expiration__lte=timezone.now()).delete()
remove_expired = classmethod(remove_expired)
@classmethod
def generate_key(cls, generator=None):
challenge, response = captcha_settings.get_challenge(generator)()
store = cls.objects.create(challenge=challenge, response=response)
return store.hashkey
@classmethod
def pick(cls):
if not captcha_settings.CAPTCHA_GET_FROM_POOL:
return cls.generate_key()
def fallback():
logger.error("Couldn't get a captcha from pool, generating")
return cls.generate_key()
# Pick up a random item from pool
minimum_expiration = timezone.now() + datetime.timedelta(
minutes=int(captcha_settings.CAPTCHA_GET_FROM_POOL_TIMEOUT)
)
store = (
cls.objects.filter(expiration__gt=minimum_expiration).order_by("?").first()
)
return (store and store.hashkey) or fallback()
@classmethod
def create_pool(cls, count=1000):
assert count > 0
while count > 0:
cls.generate_key()
count -= 1
Remove warning for django 3.2
WARNINGS:
captcha.CaptchaStore: (models.W042) Auto-created primary key used when not defining a primary key type, by default 'django.db.models.AutoField'.
HINT: Configure the DEFAULT_AUTO_FIELD setting or the AppConfig.default_auto_field attribute to point to a subclass of AutoField, e.g. 'django.db.models.BigAutoField'.
import datetime
import hashlib
import logging
import random
import time
import django
from captcha.conf import settings as captcha_settings
from django.db import models
from django.utils import timezone
if django.VERSION >= (3, 0):
from django.utils.encoding import smart_str as smart_text
else:
from django.utils.encoding import smart_text
# Heavily based on session key generation in Django
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, "SystemRandom"):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
MAX_RANDOM_KEY = 18446744073709551616 # 2 << 63
logger = logging.getLogger(__name__)
class CaptchaStore(models.Model):
id = models.AutoField(primary_key=True)
challenge = models.CharField(blank=False, max_length=32)
response = models.CharField(blank=False, max_length=32)
hashkey = models.CharField(blank=False, max_length=40, unique=True)
expiration = models.DateTimeField(blank=False)
def save(self, *args, **kwargs):
self.response = self.response.lower()
if not self.expiration:
self.expiration = timezone.now() + datetime.timedelta(
minutes=int(captcha_settings.CAPTCHA_TIMEOUT)
)
if not self.hashkey:
key_ = (
smart_text(randrange(0, MAX_RANDOM_KEY))
+ smart_text(time.time())
+ smart_text(self.challenge, errors="ignore")
+ smart_text(self.response, errors="ignore")
).encode("utf8")
self.hashkey = hashlib.sha1(key_).hexdigest()
del key_
super(CaptchaStore, self).save(*args, **kwargs)
def __str__(self):
return self.challenge
def remove_expired(cls):
cls.objects.filter(expiration__lte=timezone.now()).delete()
remove_expired = classmethod(remove_expired)
@classmethod
def generate_key(cls, generator=None):
challenge, response = captcha_settings.get_challenge(generator)()
store = cls.objects.create(challenge=challenge, response=response)
return store.hashkey
@classmethod
def pick(cls):
if not captcha_settings.CAPTCHA_GET_FROM_POOL:
return cls.generate_key()
def fallback():
logger.error("Couldn't get a captcha from pool, generating")
return cls.generate_key()
# Pick up a random item from pool
minimum_expiration = timezone.now() + datetime.timedelta(
minutes=int(captcha_settings.CAPTCHA_GET_FROM_POOL_TIMEOUT)
)
store = (
cls.objects.filter(expiration__gt=minimum_expiration).order_by("?").first()
)
return (store and store.hashkey) or fallback()
@classmethod
def create_pool(cls, count=1000):
assert count > 0
while count > 0:
cls.generate_key()
count -= 1
|
from __future__ import print_function, division, absolute_import
import itertools
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import skimage.morphology
import cv2
import imgaug as ia
from imgaug import random as iarandom
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (
array_equal_lists, keypoints_equal, reseed, assert_cbaois_equal)
from imgaug.augmentables.heatmaps import HeatmapsOnImage
from imgaug.augmentables.segmaps import SegmentationMapsOnImage
def _assert_same_min_max(observed, actual):
assert np.isclose(observed.min_value, actual.min_value, rtol=0, atol=1e-6)
assert np.isclose(observed.max_value, actual.max_value, rtol=0, atol=1e-6)
def _assert_same_shape(observed, actual):
assert observed.shape == actual.shape
# TODO add more tests for Affine .mode
# TODO add more tests for Affine shear
class TestAffine(unittest.TestCase):
def test_get_parameters(self):
aug = iaa.Affine(scale=1, translate_px=2, rotate=3, shear=4,
order=1, cval=0, mode="constant", backend="cv2",
fit_output=True)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic) # scale
assert isinstance(params[1], iap.Deterministic) # translate
assert isinstance(params[2], iap.Deterministic) # rotate
assert isinstance(params[3], iap.Deterministic) # shear
assert params[0].value == 1 # scale
assert params[1].value == 2 # translate
assert params[2].value == 3 # rotate
assert params[3].value == 4 # shear
assert params[4].value == 1 # order
assert params[5].value == 0 # cval
assert params[6].value == "constant" # mode
assert params[7] == "cv2" # backend
assert params[8] is True # fit_output
class TestAffine___init__(unittest.TestCase):
def test___init___scale_is_stochastic_parameter(self):
aug = iaa.Affine(scale=iap.Uniform(0.7, 0.9))
assert isinstance(aug.scale, iap.Uniform)
assert isinstance(aug.scale.a, iap.Deterministic)
assert isinstance(aug.scale.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.scale.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.scale.b.value < 0.9 + 1e-8
def test___init___translate_percent_is_stochastic_parameter(self):
aug = iaa.Affine(translate_percent=iap.Uniform(0.7, 0.9))
assert isinstance(aug.translate, iap.Uniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.translate.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.translate.b.value < 0.9 + 1e-8
def test___init___translate_px_is_stochastic_parameter(self):
aug = iaa.Affine(translate_px=iap.DiscreteUniform(1, 10))
assert isinstance(aug.translate, iap.DiscreteUniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert aug.translate.a.value == 1
assert aug.translate.b.value == 10
def test___init___rotate_is_stochastic_parameter(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=iap.Uniform(10, 20),
shear=0)
assert isinstance(aug.rotate, iap.Uniform)
assert isinstance(aug.rotate.a, iap.Deterministic)
assert aug.rotate.a.value == 10
assert isinstance(aug.rotate.b, iap.Deterministic)
assert aug.rotate.b.value == 20
def test___init___shear_is_stochastic_parameter(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0,
shear=iap.Uniform(10, 20))
assert isinstance(aug.shear, iap.Uniform)
assert isinstance(aug.shear.a, iap.Deterministic)
assert aug.shear.a.value == 10
assert isinstance(aug.shear.b, iap.Deterministic)
assert aug.shear.b.value == 20
def test___init___cval_is_all(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=ia.ALL)
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
def test___init___cval_is_stochastic_parameter(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=iap.DiscreteUniform(1, 5))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 1
assert aug.cval.b.value == 5
def test___init___mode_is_all(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
def test___init___mode_is_string(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode="edge")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "edge"
def test___init___mode_is_list(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=["constant", "edge"])
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "constant" in aug.mode.a
and "edge" in aug.mode.a)
def test___init___mode_is_stochastic_parameter(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=iap.Choice(["constant", "edge"]))
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "constant" in aug.mode.a
and "edge" in aug.mode.a)
def test___init___fit_output_is_true(self):
aug = iaa.Affine(fit_output=True)
assert aug.fit_output is True
# ------------
# exceptions for bad inputs
# ------------
def test___init___bad_datatype_for_scale_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(scale=False)
def test___init___bad_datatype_for_translate_px_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(translate_px=False)
def test___init___bad_datatype_for_translate_percent_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(translate_percent=False)
def test___init___bad_datatype_for_rotate_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(scale=1.0, translate_px=0, rotate=False, shear=0,
cval=0)
def test___init___bad_datatype_for_shear_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=False,
cval=0)
def test___init___bad_datatype_for_cval_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=None)
def test___init___bad_datatype_for_mode_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=False)
def test___init___bad_datatype_for_order_fails(self):
# bad order datatype in case of backend=cv2
with self.assertRaises(Exception):
_ = iaa.Affine(backend="cv2", order="test")
def test___init___nonexistent_order_for_cv2_fails(self):
# non-existent order in case of backend=cv2
with self.assertRaises(AssertionError):
_ = iaa.Affine(backend="cv2", order=-1)
# TODO add test with multiple images
class TestAffine_noop(unittest.TestCase):
def setUp(self):
reseed()
@property
def base_img(self):
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
return base_img[:, :, np.newaxis]
@property
def images(self):
return np.array([self.base_img])
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
return [ia.KeypointsOnImage(kps, shape=self.base_img.shape)]
@property
def psoi(self):
polygons = [ia.Polygon([(0, 0), (2, 0), (2, 2)])]
return [ia.PolygonsOnImage(polygons, shape=self.base_img.shape)]
@property
def lsoi(self):
ls = [ia.LineString([(0, 0), (2, 0), (2, 2)])]
return [ia.LineStringsOnImage(ls, shape=self.base_img.shape)]
@property
def bbsoi(self):
bbs = [ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.base_img.shape)]
def test_image_noop(self):
# no translation/scale/rotate/shear, shouldnt change nothing
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_image_noop__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_image_noop__list(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images([self.base_img])
expected = [self.base_img]
assert array_equal_lists(observed, expected)
def test_image_noop__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.base_img])
expected = [self.base_img]
assert array_equal_lists(observed, expected)
def test_keypoints_noop(self):
self._test_cba_noop("augment_keypoints", self.kpsoi, False)
def test_keypoints_noop__deterministic(self):
self._test_cba_noop("augment_keypoints", self.kpsoi, True)
def test_polygons_noop(self):
self._test_cba_noop("augment_polygons", self.psoi, False)
def test_polygons_noop__deterministic(self):
self._test_cba_noop("augment_polygons", self.psoi, True)
def test_line_strings_noop(self):
self._test_cba_noop("augment_line_strings", self.lsoi, False)
def test_line_strings_noop__deterministic(self):
self._test_cba_noop("augment_line_strings", self.lsoi, True)
def test_bounding_boxes_noop(self):
self._test_cba_noop("augment_bounding_boxes", self.bbsoi, False)
def test_bounding_boxes_noop__deterministic(self):
self._test_cba_noop("augment_bounding_boxes", self.bbsoi, True)
@classmethod
def _test_cba_noop(cls, augf_name, cbaoi, deterministic):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
if deterministic:
aug = aug.to_deterministic()
observed = getattr(aug, augf_name)(cbaoi)
expected = cbaoi
assert_cbaois_equal(observed, expected)
# TODO add test with multiple images
class TestAffine_scale(unittest.TestCase):
def setUp(self):
reseed()
# ---------------------
# scale: zoom in
# ---------------------
@property
def base_img(self):
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
return base_img[:, :, np.newaxis]
@property
def images(self):
return np.array([self.base_img])
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
return [ia.KeypointsOnImage(kps, shape=self.base_img.shape)]
def kpsoi_scaled(self, scale_y, scale_x):
coords = np.array([
[0, 0],
[1, 1],
[2, 2]
], dtype=np.float32)
coords_scaled = self._scale_coordinates(coords, scale_y, scale_x)
return [ia.KeypointsOnImage.from_xy_array(
coords_scaled,
shape=self.base_img.shape)]
@property
def psoi(self):
polys = [ia.Polygon([(0, 0), (0, 2), (2, 2)])]
return [ia.PolygonsOnImage(polys, shape=self.base_img.shape)]
def psoi_scaled(self, scale_y, scale_x):
coords = np.array([
[0, 0],
[0, 2],
[2, 2]
], dtype=np.float32)
coords_scaled = self._scale_coordinates(coords, scale_y, scale_x)
return [ia.PolygonsOnImage(
[ia.Polygon(coords_scaled)],
shape=self.base_img.shape)]
@property
def lsoi(self):
ls = [ia.LineString([(0, 0), (0, 2), (2, 2)])]
return [ia.LineStringsOnImage(ls, shape=self.base_img.shape)]
def lsoi_scaled(self, scale_y, scale_x):
coords = np.array([
[0, 0],
[0, 2],
[2, 2]
], dtype=np.float32)
coords_scaled = self._scale_coordinates(coords, scale_y, scale_x)
return [ia.LineStringsOnImage(
[ia.LineString(coords_scaled)],
shape=self.base_img.shape)]
@property
def bbsoi(self):
bbs = [ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.base_img.shape)]
def bbsoi_scaled(self, scale_y, scale_x):
coords = np.array([
[0, 1],
[2, 3]
], dtype=np.float32)
coords_scaled = self._scale_coordinates(coords, scale_y, scale_x)
return [ia.BoundingBoxesOnImage.from_xyxy_array(
coords_scaled.reshape((1, 4)),
shape=self.base_img.shape)]
def _scale_coordinates(self, coords, scale_y, scale_x):
height, width = self.base_img.shape[0:2]
coords_scaled = []
for x, y in coords:
# the additional +0.5 and -0.5 here makes up for the shift factor
# used in the affine matrix generation
offset = 0.0
x_centered = x - width/2 + offset
y_centered = y - height/2 + offset
x_new = x_centered * scale_x + width/2 - offset
y_new = y_centered * scale_y + height/2 - offset
coords_scaled.append((x_new, y_new))
return np.float32(coords_scaled)
@property
def scale_zoom_in_outer_pixels(self):
base_img = self.base_img
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
return outer_pixels
def test_image_scale_zoom_in(self):
aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images(self.images)
outer_pixels = self.scale_zoom_in_outer_pixels
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
def test_image_scale_zoom_in__deterministic(self):
aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
outer_pixels = self.scale_zoom_in_outer_pixels
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
def test_image_scale_zoom_in__list(self):
aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images([self.base_img])
outer_pixels = self.scale_zoom_in_outer_pixels
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
def test_image_scale_zoom_in__list_and_deterministic(self):
aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.base_img])
outer_pixels = self.scale_zoom_in_outer_pixels
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
def test_keypoints_scale_zoom_in(self):
self._test_cba_scale(
"augment_keypoints", 1.75,
self.kpsoi, self.kpsoi_scaled(1.75, 1.75), False)
def test_keypoints_scale_zoom_in__deterministic(self):
self._test_cba_scale(
"augment_keypoints", 1.75,
self.kpsoi, self.kpsoi_scaled(1.75, 1.75), True)
def test_polygons_scale_zoom_in(self):
self._test_cba_scale(
"augment_polygons", 1.75,
self.psoi, self.psoi_scaled(1.75, 1.75), False)
def test_polygons_scale_zoom_in__deterministic(self):
self._test_cba_scale(
"augment_polygons", 1.75,
self.psoi, self.psoi_scaled(1.75, 1.75), True)
def test_line_strings_scale_zoom_in(self):
self._test_cba_scale(
"augment_line_strings", 1.75,
self.lsoi, self.lsoi_scaled(1.75, 1.75), False)
def test_line_strings_scale_zoom_in__deterministic(self):
self._test_cba_scale(
"augment_line_strings", 1.75,
self.lsoi, self.lsoi_scaled(1.75, 1.75), True)
def test_bounding_boxes_scale_zoom_in(self):
self._test_cba_scale(
"augment_bounding_boxes", 1.75,
self.bbsoi, self.bbsoi_scaled(1.75, 1.75), False)
def test_bounding_boxes_scale_zoom_in__deterministic(self):
self._test_cba_scale(
"augment_bounding_boxes", 1.75,
self.bbsoi, self.bbsoi_scaled(1.75, 1.75), True)
@classmethod
def _test_cba_scale(cls, augf_name, scale, cbaoi, cbaoi_scaled,
deterministic):
aug = iaa.Affine(scale=scale, translate_px=0, rotate=0, shear=0)
if deterministic:
aug = aug.to_deterministic()
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi_scaled)
# ---------------------
# scale: zoom in only on x axis
# ---------------------
def test_image_scale_zoom_in_only_x_axis(self):
aug = iaa.Affine(scale={"x": 1.75, "y": 1.0},
translate_px=0, rotate=0, shear=0)
observed = aug.augment_images(self.images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
def test_image_scale_zoom_in_only_x_axis__deterministic(self):
aug = iaa.Affine(scale={"x": 1.75, "y": 1.0},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
def test_image_scale_zoom_in_only_x_axis__list(self):
aug = iaa.Affine(scale={"x": 1.75, "y": 1.0},
translate_px=0, rotate=0, shear=0)
observed = aug.augment_images([self.base_img])
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
def test_image_scale_zoom_in_only_x_axis__deterministic_and_list(self):
aug = iaa.Affine(scale={"x": 1.75, "y": 1.0},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.base_img])
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
def test_keypoints_scale_zoom_in_only_x_axis(self):
self._test_cba_scale(
"augment_keypoints", {"y": 1.0, "x": 1.75}, self.kpsoi,
self.kpsoi_scaled(1.0, 1.75), False)
def test_keypoints_scale_zoom_in_only_x_axis__deterministic(self):
self._test_cba_scale(
"augment_keypoints", {"y": 1.0, "x": 1.75}, self.kpsoi,
self.kpsoi_scaled(1.0, 1.75), True)
def test_polygons_scale_zoom_in_only_x_axis(self):
self._test_cba_scale(
"augment_polygons", {"y": 1.0, "x": 1.75}, self.psoi,
self.psoi_scaled(1.0, 1.75), False)
def test_polygons_scale_zoom_in_only_x_axis__deterministic(self):
self._test_cba_scale(
"augment_polygons", {"y": 1.0, "x": 1.75}, self.psoi,
self.psoi_scaled(1.0, 1.75), True)
def test_line_strings_scale_zoom_in_only_x_axis(self):
self._test_cba_scale(
"augment_line_strings", {"y": 1.0, "x": 1.75}, self.lsoi,
self.lsoi_scaled(1.0, 1.75), False)
def test_line_strings_scale_zoom_in_only_x_axis__deterministic(self):
self._test_cba_scale(
"augment_line_strings", {"y": 1.0, "x": 1.75}, self.lsoi,
self.lsoi_scaled(1.0, 1.75), True)
def test_bounding_boxes_scale_zoom_in_only_x_axis(self):
self._test_cba_scale(
"augment_bounding_boxes", {"y": 1.0, "x": 1.75}, self.bbsoi,
self.bbsoi_scaled(1.0, 1.75), False)
def test_bounding_boxes_scale_zoom_in_only_x_axis__deterministic(self):
self._test_cba_scale(
"augment_bounding_boxes", {"y": 1.0, "x": 1.75}, self.bbsoi,
self.bbsoi_scaled(1.0, 1.75), True)
# ---------------------
# scale: zoom in only on y axis
# ---------------------
def test_image_scale_zoom_in_only_y_axis(self):
aug = iaa.Affine(scale={"x": 1.0, "y": 1.75},
translate_px=0, rotate=0, shear=0)
observed = aug.augment_images(self.images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
def test_image_scale_zoom_in_only_y_axis__deterministic(self):
aug = iaa.Affine(scale={"x": 1.0, "y": 1.75},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
def test_image_scale_zoom_in_only_y_axis__list(self):
aug = iaa.Affine(scale={"x": 1.0, "y": 1.75},
translate_px=0, rotate=0, shear=0)
observed = aug.augment_images([self.base_img])
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
def test_image_scale_zoom_in_only_y_axis__deterministic_and_list(self):
aug = iaa.Affine(scale={"x": 1.0, "y": 1.75},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.base_img])
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
def test_keypoints_scale_zoom_in_only_y_axis(self):
self._test_cba_scale(
"augment_keypoints", {"y": 1.75, "x": 1.0}, self.kpsoi,
self.kpsoi_scaled(1.75, 1.0), False)
def test_keypoints_scale_zoom_in_only_y_axis__deterministic(self):
self._test_cba_scale(
"augment_keypoints", {"y": 1.75, "x": 1.0}, self.kpsoi,
self.kpsoi_scaled(1.75, 1.0), True)
def test_polygons_scale_zoom_in_only_y_axis(self):
self._test_cba_scale(
"augment_polygons", {"y": 1.75, "x": 1.0}, self.psoi,
self.psoi_scaled(1.75, 1.0), False)
def test_polygons_scale_zoom_in_only_y_axis__deterministic(self):
self._test_cba_scale(
"augment_polygons", {"y": 1.75, "x": 1.0}, self.psoi,
self.psoi_scaled(1.75, 1.0), True)
def test_line_strings_scale_zoom_in_only_y_axis(self):
self._test_cba_scale(
"augment_polygons", {"y": 1.75, "x": 1.0}, self.psoi,
self.psoi_scaled(1.75, 1.0), False)
def test_line_strings_scale_zoom_in_only_y_axis__deterministic(self):
self._test_cba_scale(
"augment_line_strings", {"y": 1.75, "x": 1.0}, self.lsoi,
self.lsoi_scaled(1.75, 1.0), True)
def test_bounding_boxes_scale_zoom_in_only_y_axis(self):
self._test_cba_scale(
"augment_bounding_boxes", {"y": 1.75, "x": 1.0}, self.bbsoi,
self.bbsoi_scaled(1.75, 1.0), False)
def test_bounding_boxes_scale_zoom_in_only_y_axis__deterministic(self):
self._test_cba_scale(
"augment_bounding_boxes", {"y": 1.75, "x": 1.0}, self.bbsoi,
self.bbsoi_scaled(1.75, 1.0), True)
# ---------------------
# scale: zoom out
# ---------------------
# these tests use a 4x4 area of all 255, which is zoomed out to a 4x4 area
# in which the center 2x2 area is 255
# zoom in should probably be adapted to this style
# no separate tests here for x/y axis, should work fine if zoom in works
# with that
@property
def scale_zoom_out_base_img(self):
return np.ones((4, 4, 1), dtype=np.uint8) * 255
@property
def scale_zoom_out_images(self):
return np.array([self.scale_zoom_out_base_img])
@property
def scale_zoom_out_outer_pixels(self):
outer_pixels = ([], [])
for y in sm.xrange(4):
xs = sm.xrange(4) if y in [0, 3] else [0, 3]
for x in xs:
outer_pixels[0].append(y)
outer_pixels[1].append(x)
return outer_pixels
@property
def scale_zoom_out_inner_pixels(self):
return [1, 1, 2, 2], [1, 2, 1, 2]
@property
def scale_zoom_out_kpsoi(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=3, y=0),
ia.Keypoint(x=0, y=3), ia.Keypoint(x=3, y=3)]
return [ia.KeypointsOnImage(kps,
shape=self.scale_zoom_out_base_img.shape)]
@property
def scale_zoom_out_kpsoi_aug(self):
kps_aug = [ia.Keypoint(x=0.765, y=0.765),
ia.Keypoint(x=2.235, y=0.765),
ia.Keypoint(x=0.765, y=2.235),
ia.Keypoint(x=2.235, y=2.235)]
return [ia.KeypointsOnImage(kps_aug,
shape=self.scale_zoom_out_base_img.shape)]
def test_image_scale_zoom_out(self):
aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images(self.scale_zoom_out_images)
outer_pixels = self.scale_zoom_out_outer_pixels
inner_pixels = self.scale_zoom_out_inner_pixels
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
def test_image_scale_zoom_out__deterministic(self):
aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.scale_zoom_out_images)
outer_pixels = self.scale_zoom_out_outer_pixels
inner_pixels = self.scale_zoom_out_inner_pixels
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
def test_image_scale_zoom_out__list(self):
aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images([self.scale_zoom_out_base_img])
outer_pixels = self.scale_zoom_out_outer_pixels
inner_pixels = self.scale_zoom_out_inner_pixels
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
def test_image_scale_zoom_out__list_and_deterministic(self):
aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.scale_zoom_out_base_img])
outer_pixels = self.scale_zoom_out_outer_pixels
inner_pixels = self.scale_zoom_out_inner_pixels
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
def test_keypoints_scale_zoom_out(self):
self._test_cba_scale(
"augment_keypoints", 0.49, self.kpsoi,
self.kpsoi_scaled(0.49, 0.49), False)
def test_keypoints_scale_zoom_out__deterministic(self):
self._test_cba_scale(
"augment_keypoints", 0.49, self.kpsoi,
self.kpsoi_scaled(0.49, 0.49), True)
def test_polygons_scale_zoom_out(self):
self._test_cba_scale(
"augment_polygons", 0.49, self.psoi,
self.psoi_scaled(0.49, 0.49), False)
def test_polygons_scale_zoom_out__deterministic(self):
self._test_cba_scale(
"augment_polygons", 0.49, self.psoi,
self.psoi_scaled(0.49, 0.49), True)
def test_line_strings_scale_zoom_out(self):
self._test_cba_scale(
"augment_line_strings", 0.49, self.lsoi,
self.lsoi_scaled(0.49, 0.49), False)
def test_line_strings_scale_zoom_out__deterministic(self):
self._test_cba_scale(
"augment_line_strings", 0.49, self.lsoi,
self.lsoi_scaled(0.49, 0.49), True)
def test_bounding_boxes_scale_zoom_out(self):
self._test_cba_scale(
"augment_bounding_boxes", 0.49, self.bbsoi,
self.bbsoi_scaled(0.49, 0.49), False)
def test_bounding_boxes_scale_zoom_out__deterministic(self):
self._test_cba_scale(
"augment_bounding_boxes", 0.49, self.bbsoi,
self.bbsoi_scaled(0.49, 0.49), True)
# ---------------------
# scale: x and y axis are both tuples
# ---------------------
def test_image_x_and_y_axis_are_tuples(self):
aug = iaa.Affine(scale={"x": (0.5, 1.5), "y": (0.5, 1.5)},
translate_px=0, rotate=0, shear=0)
image = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 2, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint8) * 100
image = image[:, :, np.newaxis]
images = np.array([image])
last_aug = None
nb_changed_aug = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
if i == 0:
last_aug = observed_aug
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
last_aug = observed_aug
assert nb_changed_aug >= int(nb_iterations * 0.8)
def test_image_x_and_y_axis_are_tuples__deterministic(self):
aug = iaa.Affine(scale={"x": (0.5, 1.5), "y": (0.5, 1.5)},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 2, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint8) * 100
image = image[:, :, np.newaxis]
images = np.array([image])
last_aug_det = None
nb_changed_aug_det = 0
nb_iterations = 10
for i in sm.xrange(nb_iterations):
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug_det = observed_aug_det
assert nb_changed_aug_det == 0
# ------------
# alignment
# TODO add alignment tests for: BBs, Polys, LS
# ------------
def test_keypoint_alignment(self):
image = np.zeros((100, 100), dtype=np.uint8)
image[40-1:40+2, 40-1:40+2] = 255
image[40-1:40+2, 60-1:60+2] = 255
kps = [ia.Keypoint(x=40, y=40), ia.Keypoint(x=60, y=40)]
kpsoi = ia.KeypointsOnImage(kps, shape=image.shape)
images = [image, image, image]
kpsois = [kpsoi.deepcopy(),
ia.KeypointsOnImage([], shape=image.shape),
kpsoi.deepcopy()]
aug = iaa.Affine(scale=[0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5,
1.6, 1.7],
order=0)
for iter in sm.xrange(40):
images_aug, kpsois_aug = aug(images=images, keypoints=kpsois)
assert kpsois_aug[1].empty
for i in [0, 2]:
image_aug = images_aug[i]
kpsoi_aug = kpsois_aug[i]
for kp in kpsoi_aug.keypoints:
value = image_aug[int(kp.y), int(kp.x)]
assert value > 200
# ------------
# make sure that polygons stay valid upon extreme scaling
# ------------
def test_polygons_stay_valid_when_using_extreme_scalings(self):
scales = [1e-4, 1e-2, 1e2, 1e4]
backends = ["auto", "cv2", "skimage"]
orders = [0, 1, 3]
gen = itertools.product(scales, backends, orders)
for scale, backend, order in gen:
with self.subTest(scale=scale, backend=backend, order=order):
aug = iaa.Affine(scale=scale, order=order)
psoi = ia.PolygonsOnImage([
ia.Polygon([(0, 0), (10, 0), (5, 5)])],
shape=(10, 10))
psoi_aug = aug.augment_polygons(psoi)
poly = psoi_aug.polygons[0]
ext = poly.exterior
assert poly.is_valid
assert ext[0][0] < ext[2][0] < ext[1][0]
assert ext[0][1] < ext[2][1]
assert np.allclose(ext[0][1], ext[1][1])
class TestAffine_translate(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
return np.uint8([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
])[:, :, np.newaxis]
@property
def image_1px_right(self):
return np.uint8([
[0, 0, 0],
[0, 0, 1],
[0, 0, 0]
])[:, :, np.newaxis]
@property
def image_1px_bottom(self):
return np.uint8([
[0, 0, 0],
[0, 0, 0],
[0, 1, 0]
])[:, :, np.newaxis]
@property
def images(self):
return np.array([self.image])
@property
def images_1px_right(self):
return np.array([self.image_1px_right])
@property
def images_1px_bottom(self):
return np.array([self.image_1px_bottom])
@property
def kpsoi(self):
kps = [ia.Keypoint(x=1, y=1)]
return [ia.KeypointsOnImage(kps, shape=self.image.shape)]
@property
def kpsoi_1px_right(self):
kps = [ia.Keypoint(x=2, y=1)]
return [ia.KeypointsOnImage(kps, shape=self.image.shape)]
@property
def kpsoi_1px_bottom(self):
kps = [ia.Keypoint(x=1, y=2)]
return [ia.KeypointsOnImage(kps, shape=self.image.shape)]
@property
def psoi(self):
polys = [ia.Polygon([(0, 0), (2, 0), (2, 2)])]
return [ia.PolygonsOnImage(polys, shape=self.image.shape)]
@property
def psoi_1px_right(self):
polys = [ia.Polygon([(0+1, 0), (2+1, 0), (2+1, 2)])]
return [ia.PolygonsOnImage(polys, shape=self.image.shape)]
@property
def psoi_1px_bottom(self):
polys = [ia.Polygon([(0, 0+1), (2, 0+1), (2, 2+1)])]
return [ia.PolygonsOnImage(polys, shape=self.image.shape)]
@property
def lsoi(self):
ls = [ia.LineString([(0, 0), (2, 0), (2, 2)])]
return [ia.LineStringsOnImage(ls, shape=self.image.shape)]
@property
def lsoi_1px_right(self):
ls = [ia.LineString([(0+1, 0), (2+1, 0), (2+1, 2)])]
return [ia.LineStringsOnImage(ls, shape=self.image.shape)]
@property
def lsoi_1px_bottom(self):
ls = [ia.LineString([(0, 0+1), (2, 0+1), (2, 2+1)])]
return [ia.LineStringsOnImage(ls, shape=self.image.shape)]
@property
def bbsoi(self):
bbs = [ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)]
@property
def bbsoi_1px_right(self):
bbs = [ia.BoundingBox(x1=0+1, y1=1, x2=2+1, y2=3)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)]
@property
def bbsoi_1px_bottom(self):
bbs = [ia.BoundingBox(x1=0, y1=1+1, x2=2, y2=3+1)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)]
# ---------------------
# translate: move one pixel to the right
# ---------------------
def test_image_translate_1px_right(self):
# move one pixel to the right
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0)
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right__list(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0)
observed = aug.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_right])
def test_image_translate_1px_right__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_right])
def test_keypoints_translate_1px_right(self):
self._test_cba_translate_px(
"augment_keypoints", {"x": 1, "y": 0},
self.kpsoi, self.kpsoi_1px_right, False)
def test_keypoints_translate_1px_right__deterministic(self):
self._test_cba_translate_px(
"augment_keypoints", {"x": 1, "y": 0},
self.kpsoi, self.kpsoi_1px_right, True)
def test_polygons_translate_1px_right(self):
self._test_cba_translate_px(
"augment_polygons", {"x": 1, "y": 0},
self.psoi, self.psoi_1px_right, False)
def test_polygons_translate_1px_right__deterministic(self):
self._test_cba_translate_px(
"augment_polygons", {"x": 1, "y": 0},
self.psoi, self.psoi_1px_right, True)
def test_line_strings_translate_1px_right(self):
self._test_cba_translate_px(
"augment_line_strings", {"x": 1, "y": 0},
self.lsoi, self.lsoi_1px_right, False)
def test_line_strings_translate_1px_right__deterministic(self):
self._test_cba_translate_px(
"augment_line_strings", {"x": 1, "y": 0},
self.lsoi, self.lsoi_1px_right, True)
def test_bounding_boxes_translate_1px_right(self):
self._test_cba_translate_px(
"augment_bounding_boxes", {"x": 1, "y": 0},
self.bbsoi, self.bbsoi_1px_right, False)
def test_bounding_boxes_translate_1px_right__deterministic(self):
self._test_cba_translate_px(
"augment_bounding_boxes", {"x": 1, "y": 0},
self.bbsoi, self.bbsoi_1px_right, True)
@classmethod
def _test_cba_translate_px(cls, augf_name, px, cbaoi, cbaoi_translated,
deterministic):
aug = iaa.Affine(scale=1.0, translate_px=px, rotate=0, shear=0)
if deterministic:
aug = aug.to_deterministic()
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi_translated)
def test_image_translate_1px_right_skimage(self):
# move one pixel to the right
# with backend = skimage
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0, backend="skimage")
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right_skimage_order_all(self):
# move one pixel to the right
# with backend = skimage, order=ALL
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0, backend="skimage", order=ia.ALL)
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right_skimage_order_is_list(self):
# move one pixel to the right
# with backend = skimage, order=list
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0, backend="skimage", order=[0, 1, 3])
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right_cv2_order_is_list(self):
# move one pixel to the right
# with backend = cv2, order=list
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0, backend="cv2", order=[0, 1, 3])
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right_cv2_order_is_stoch_param(self):
# move one pixel to the right
# with backend = cv2, order=StochasticParameter
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0, backend="cv2", order=iap.Choice([0, 1, 3]))
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
# ---------------------
# translate: move one pixel to the bottom
# ---------------------
def test_image_translate_1px_bottom(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0,
shear=0)
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_bottom)
def test_image_translate_1px_bottom__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0,
shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_bottom)
def test_image_translate_1px_bottom__list(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0,
shear=0)
observed = aug.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_bottom])
def test_image_translate_1px_bottom__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0,
shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_bottom])
def test_keypoints_translate_1px_bottom(self):
self._test_cba_translate_px(
"augment_keypoints", {"x": 0, "y": 1},
self.kpsoi, self.kpsoi_1px_bottom, False)
def test_keypoints_translate_1px_bottom__deterministic(self):
self._test_cba_translate_px(
"augment_keypoints", {"x": 0, "y": 1},
self.kpsoi, self.kpsoi_1px_bottom, True)
def test_polygons_translate_1px_bottom(self):
self._test_cba_translate_px(
"augment_polygons", {"x": 0, "y": 1},
self.psoi, self.psoi_1px_bottom, False)
def test_polygons_translate_1px_bottom__deterministic(self):
self._test_cba_translate_px(
"augment_polygons", {"x": 0, "y": 1},
self.psoi, self.psoi_1px_bottom, True)
def test_line_strings_translate_1px_bottom(self):
self._test_cba_translate_px(
"augment_line_strings", {"x": 0, "y": 1},
self.lsoi, self.lsoi_1px_bottom, False)
def test_line_strings_translate_1px_bottom__deterministic(self):
self._test_cba_translate_px(
"augment_line_strings", {"x": 0, "y": 1},
self.lsoi, self.lsoi_1px_bottom, True)
def test_bounding_boxes_translate_1px_bottom(self):
self._test_cba_translate_px(
"augment_bounding_boxes", {"x": 0, "y": 1},
self.bbsoi, self.bbsoi_1px_bottom, False)
def test_bounding_boxes_translate_1px_bottom__deterministic(self):
self._test_cba_translate_px(
"augment_bounding_boxes", {"x": 0, "y": 1},
self.bbsoi, self.bbsoi_1px_bottom, True)
# ---------------------
# translate: fraction of the image size (towards the right)
# ---------------------
def test_image_translate_33percent_right(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0.3333, "y": 0},
rotate=0, shear=0)
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_33percent_right__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0.3333, "y": 0},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_33percent_right__list(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0.3333, "y": 0},
rotate=0, shear=0)
observed = aug.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_right])
def test_image_translate_33percent_right__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0.3333, "y": 0},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_right])
def test_keypoints_translate_33percent_right(self):
self._test_cba_translate_percent(
"augment_keypoints", {"x": 0.3333, "y": 0},
self.kpsoi, self.kpsoi_1px_right, False)
def test_keypoints_translate_33percent_right__deterministic(self):
self._test_cba_translate_percent(
"augment_keypoints", {"x": 0.3333, "y": 0},
self.kpsoi, self.kpsoi_1px_right, True)
def test_polygons_translate_33percent_right(self):
self._test_cba_translate_percent(
"augment_polygons", {"x": 0.3333, "y": 0},
self.psoi, self.psoi_1px_right, False)
def test_polygons_translate_33percent_right__deterministic(self):
self._test_cba_translate_percent(
"augment_polygons", {"x": 0.3333, "y": 0},
self.psoi, self.psoi_1px_right, True)
def test_line_strings_translate_33percent_right(self):
self._test_cba_translate_percent(
"augment_line_strings", {"x": 0.3333, "y": 0},
self.lsoi, self.lsoi_1px_right, False)
def test_line_strings_translate_33percent_right__deterministic(self):
self._test_cba_translate_percent(
"augment_line_strings", {"x": 0.3333, "y": 0},
self.lsoi, self.lsoi_1px_right, True)
def test_bounding_boxes_translate_33percent_right(self):
self._test_cba_translate_percent(
"augment_bounding_boxes", {"x": 0.3333, "y": 0},
self.bbsoi, self.bbsoi_1px_right, False)
def test_bounding_boxes_translate_33percent_right__deterministic(self):
self._test_cba_translate_percent(
"augment_bounding_boxes", {"x": 0.3333, "y": 0},
self.bbsoi, self.bbsoi_1px_right, True)
@classmethod
def _test_cba_translate_percent(cls, augf_name, percent, cbaoi,
cbaoi_translated, deterministic):
aug = iaa.Affine(scale=1.0, translate_percent=percent, rotate=0,
shear=0)
if deterministic:
aug = aug.to_deterministic()
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi_translated)
# ---------------------
# translate: fraction of the image size (towards the bottom)
# ---------------------
def test_image_translate_33percent_bottom(self):
# move 33% (one pixel) to the bottom
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0, "y": 0.3333},
rotate=0, shear=0)
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_bottom)
def test_image_translate_33percent_bottom__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0, "y": 0.3333},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_bottom)
def test_image_translate_33percent_bottom__list(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0, "y": 0.3333},
rotate=0, shear=0)
observed = aug.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_bottom])
def test_image_translate_33percent_bottom__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0, "y": 0.3333},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_bottom])
def test_keypoints_translate_33percent_bottom(self):
self._test_cba_translate_percent(
"augment_keypoints", {"x": 0, "y": 0.3333},
self.kpsoi, self.kpsoi_1px_bottom, False)
def test_keypoints_translate_33percent_bottom__deterministic(self):
self._test_cba_translate_percent(
"augment_keypoints", {"x": 0, "y": 0.3333},
self.kpsoi, self.kpsoi_1px_bottom, True)
def test_polygons_translate_33percent_bottom(self):
self._test_cba_translate_percent(
"augment_polygons", {"x": 0, "y": 0.3333},
self.psoi, self.psoi_1px_bottom, False)
def test_polygons_translate_33percent_bottom__deterministic(self):
self._test_cba_translate_percent(
"augment_polygons", {"x": 0, "y": 0.3333},
self.psoi, self.psoi_1px_bottom, True)
def test_line_strings_translate_33percent_bottom(self):
self._test_cba_translate_percent(
"augment_line_strings", {"x": 0, "y": 0.3333},
self.lsoi, self.lsoi_1px_bottom, False)
def test_line_strings_translate_33percent_bottom__deterministic(self):
self._test_cba_translate_percent(
"augment_line_strings", {"x": 0, "y": 0.3333},
self.lsoi, self.lsoi_1px_bottom, True)
def test_bounding_boxes_translate_33percent_bottom(self):
self._test_cba_translate_percent(
"augment_bounding_boxes", {"x": 0, "y": 0.3333},
self.bbsoi, self.bbsoi_1px_bottom, False)
def test_bounding_boxes_translate_33percent_bottom__deterministic(self):
self._test_cba_translate_percent(
"augment_bounding_boxes", {"x": 0, "y": 0.3333},
self.bbsoi, self.bbsoi_1px_bottom, True)
# ---------------------
# translate: axiswise uniform distributions
# ---------------------
def test_image_translate_by_axiswise_uniform_distributions(self):
# 0-1px to left/right and 0-1px to top/bottom
aug = iaa.Affine(scale=1.0, translate_px={"x": (-1, 1), "y": (-1, 1)},
rotate=0, shear=0)
last_aug = None
nb_changed_aug = 0
nb_iterations = 1000
centers_aug = self.image.astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(self.images)
if i == 0:
last_aug = observed_aug
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
last_aug = observed_aug
assert len(observed_aug[0].nonzero()[0]) == 1
centers_aug += (observed_aug[0] > 0)
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert (centers_aug > int(nb_iterations * (1/9 * 0.6))).all()
assert (centers_aug < int(nb_iterations * (1/9 * 1.4))).all()
def test_image_translate_by_axiswise_uniform_distributions__det(self):
# 0-1px to left/right and 0-1px to top/bottom
aug = iaa.Affine(scale=1.0, translate_px={"x": (-1, 1), "y": (-1, 1)},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
last_aug_det = None
nb_changed_aug_det = 0
nb_iterations = 10
centers_aug_det = self.image.astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug_det = aug_det.augment_images(self.images)
if i == 0:
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug_det = observed_aug_det
assert len(observed_aug_det[0].nonzero()[0]) == 1
centers_aug_det += (observed_aug_det[0] > 0)
assert nb_changed_aug_det == 0
# ---------------------
# translate heatmaps
# ---------------------
@property
def heatmaps(self):
return ia.HeatmapsOnImage(
np.float32([
[0.0, 0.5, 0.75],
[0.0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
@property
def heatmaps_1px_right(self):
return ia.HeatmapsOnImage(
np.float32([
[0.0, 0.0, 0.5],
[0.0, 0.0, 0.5],
[0.0, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
def test_heatmaps_translate_1px_right(self):
aug = iaa.Affine(translate_px={"x": 1})
observed = aug.augment_heatmaps([self.heatmaps])[0]
_assert_same_shape(observed, self.heatmaps)
_assert_same_min_max(observed, self.heatmaps)
assert np.array_equal(observed.get_arr(),
self.heatmaps_1px_right.get_arr())
def test_heatmaps_translate_1px_right_should_ignore_cval(self):
# should still use mode=constant cval=0 even when other settings chosen
aug = iaa.Affine(translate_px={"x": 1}, cval=255)
observed = aug.augment_heatmaps([self.heatmaps])[0]
_assert_same_shape(observed, self.heatmaps)
_assert_same_min_max(observed, self.heatmaps)
assert np.array_equal(observed.get_arr(),
self.heatmaps_1px_right.get_arr())
def test_heatmaps_translate_1px_right_should_ignore_mode(self):
aug = iaa.Affine(translate_px={"x": 1}, mode="edge", cval=255)
observed = aug.augment_heatmaps([self.heatmaps])[0]
_assert_same_shape(observed, self.heatmaps)
_assert_same_min_max(observed, self.heatmaps)
assert np.array_equal(observed.get_arr(),
self.heatmaps_1px_right.get_arr())
# ---------------------
# translate segmaps
# ---------------------
@property
def segmaps(self):
return SegmentationMapsOnImage(
np.int32([
[0, 1, 2],
[0, 1, 2],
[2, 2, 2],
]),
shape=(3, 3, 3)
)
@property
def segmaps_1px_right(self):
return SegmentationMapsOnImage(
np.int32([
[0, 0, 1],
[0, 0, 1],
[0, 2, 2],
]),
shape=(3, 3, 3)
)
def test_segmaps_translate_1px_right(self):
aug = iaa.Affine(translate_px={"x": 1})
observed = aug.augment_segmentation_maps([self.segmaps])[0]
_assert_same_shape(observed, self.segmaps)
assert np.array_equal(observed.get_arr(),
self.segmaps_1px_right.get_arr())
def test_segmaps_translate_1px_right_should_ignore_cval(self):
# should still use mode=constant cval=0 even when other settings chosen
aug = iaa.Affine(translate_px={"x": 1}, cval=255)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
_assert_same_shape(observed, self.segmaps)
assert np.array_equal(observed.get_arr(),
self.segmaps_1px_right.get_arr())
def test_segmaps_translate_1px_right_should_ignore_mode(self):
aug = iaa.Affine(translate_px={"x": 1}, mode="edge", cval=255)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
_assert_same_shape(observed, self.segmaps)
assert np.array_equal(observed.get_arr(),
self.segmaps_1px_right.get_arr())
class TestAffine_rotate(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
return np.uint8([
[0, 0, 0],
[255, 255, 255],
[0, 0, 0]
])[:, :, np.newaxis]
@property
def image_rot90(self):
return np.uint8([
[0, 255, 0],
[0, 255, 0],
[0, 255, 0]
])[:, :, np.newaxis]
@property
def images(self):
return np.array([self.image])
@property
def images_rot90(self):
return np.array([self.image_rot90])
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=1)]
return [ia.KeypointsOnImage(kps, shape=self.image.shape)]
@property
def kpsoi_rot90(self):
kps = [ia.Keypoint(x=3-1, y=0), ia.Keypoint(x=3-1, y=1),
ia.Keypoint(x=3-1, y=2)]
return [ia.KeypointsOnImage(kps, shape=self.image_rot90.shape)]
@property
def psoi(self):
polys = [ia.Polygon([(0, 0), (3, 0), (3, 3)])]
return [ia.PolygonsOnImage(polys, shape=self.image.shape)]
@property
def psoi_rot90(self):
polys = [ia.Polygon([(3-0, 0), (3-0, 3), (3-3, 3)])]
return [ia.PolygonsOnImage(polys, shape=self.image_rot90.shape)]
@property
def lsoi(self):
ls = [ia.LineString([(0, 0), (3, 0), (3, 3)])]
return [ia.LineStringsOnImage(ls, shape=self.image.shape)]
@property
def lsoi_rot90(self):
ls = [ia.LineString([(3-0, 0), (3-0, 3), (3-3, 3)])]
return [ia.LineStringsOnImage(ls, shape=self.image_rot90.shape)]
@property
def bbsoi(self):
bbs = [ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)]
@property
def bbsoi_rot90(self):
bbs = [ia.BoundingBox(x1=0, y1=0, x2=2, y2=2)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.image_rot90.shape)]
def test_image_rot90(self):
# rotate by 90 degrees
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)
observed = aug.augment_images(self.images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, self.images_rot90)
def test_image_rot90__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, self.images_rot90)
def test_image_rot90__list(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)
observed = aug.augment_images([self.image])
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, [self.image_rot90])
def test_image_rot90__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, [self.image_rot90])
def test_keypoints_rot90(self):
self._test_cba_rotate(
"augment_keypoints", 90, self.kpsoi, self.kpsoi_rot90, False)
def test_keypoints_rot90__deterministic(self):
self._test_cba_rotate(
"augment_keypoints", 90, self.kpsoi, self.kpsoi_rot90, True)
def test_polygons_rot90(self):
self._test_cba_rotate(
"augment_polygons", 90, self.psoi, self.psoi_rot90, False)
def test_polygons_rot90__deterministic(self):
self._test_cba_rotate(
"augment_polygons", 90, self.psoi, self.psoi_rot90, True)
def test_line_strings_rot90(self):
self._test_cba_rotate(
"augment_line_strings", 90, self.lsoi, self.lsoi_rot90, False)
def test_line_strings_rot90__deterministic(self):
self._test_cba_rotate(
"augment_line_strings", 90, self.lsoi, self.lsoi_rot90, True)
def test_bounding_boxes_rot90(self):
self._test_cba_rotate(
"augment_bounding_boxes", 90, self.bbsoi, self.bbsoi_rot90, False)
def test_bounding_boxes_rot90__deterministic(self):
self._test_cba_rotate(
"augment_bounding_boxes", 90, self.bbsoi, self.bbsoi_rot90, True)
@classmethod
def _test_cba_rotate(cls, augf_name, rotate, cbaoi,
cbaoi_rotated, deterministic):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=rotate,
shear=0)
if deterministic:
aug = aug.to_deterministic()
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi_rotated)
def test_image_rotate_is_tuple_0_to_364_deg(self):
# random rotation 0-364 degrees
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=(0, 364), shear=0)
last_aug = None
nb_changed_aug = 0
nb_iterations = 1000
pixels_sums_aug = self.image.astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(self.images)
if i == 0:
last_aug = observed_aug
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
last_aug = observed_aug
pixels_sums_aug += (observed_aug[0] > 100)
assert nb_changed_aug >= int(nb_iterations * 0.9)
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
# outer pixels, should sometimes be white
# the values here had to be set quite tolerant, the middle pixels at
# top/left/bottom/right get more activation than expected
outer_pixels = ([0, 0, 0, 1, 1, 2, 2, 2],
[0, 1, 2, 0, 2, 0, 1, 2])
assert (
pixels_sums_aug[outer_pixels] > int(nb_iterations * (2/8 * 0.4))
).all()
assert (
pixels_sums_aug[outer_pixels] < int(nb_iterations * (2/8 * 2.0))
).all()
def test_image_rotate_is_tuple_0_to_364_deg__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=(0, 364), shear=0)
aug_det = aug.to_deterministic()
last_aug_det = None
nb_changed_aug_det = 0
nb_iterations = 10
pixels_sums_aug_det = self.image.astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug_det = aug_det.augment_images(self.images)
if i == 0:
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug_det = observed_aug_det
pixels_sums_aug_det += (observed_aug_det[0] > 100)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug_det[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug_det[1, 1] < (nb_iterations * 1.02)
def test_alignment_between_images_and_heatmaps_for_fixed_rot(self):
# measure alignment between images and heatmaps when rotating
for backend in ["auto", "cv2", "skimage"]:
aug = iaa.Affine(rotate=45, backend=backend)
image = np.zeros((7, 6), dtype=np.uint8)
image[:, 2:3+1] = 255
hm = ia.HeatmapsOnImage(image.astype(np.float32)/255, shape=(7, 6))
img_aug = aug.augment_image(image)
hm_aug = aug.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = hm_aug.arr_0to1 > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (7, 6)
assert hm_aug.arr_0to1.shape == (7, 6, 1)
assert (same / img_aug_mask.size) >= 0.95
def test_alignment_between_images_and_smaller_heatmaps_for_fixed_rot(self):
# measure alignment between images and heatmaps when rotating
# here with smaller heatmaps
for backend in ["auto", "cv2", "skimage"]:
aug = iaa.Affine(rotate=45, backend=backend)
image = np.zeros((56, 48), dtype=np.uint8)
image[:, 16:24+1] = 255
hm = ia.HeatmapsOnImage(
ia.imresize_single_image(
image, (28, 24), interpolation="cubic"
).astype(np.float32)/255,
shape=(56, 48)
)
img_aug = aug.augment_image(image)
hm_aug = aug.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, img_aug.shape[0:2], interpolation="cubic"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (56, 48)
assert hm_aug.arr_0to1.shape == (28, 24, 1)
assert (same / img_aug_mask.size) >= 0.9
class TestAffine_cval(unittest.TestCase):
@property
def image(self):
return np.ones((3, 3, 1), dtype=np.uint8) * 255
@property
def images(self):
return np.array([self.image])
def test_image_fixed_cval(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=128)
observed = aug.augment_images(self.images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
def test_image_fixed_cval__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=128)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
def test_image_fixed_cval__list(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=128)
observed = aug.augment_images([self.image])
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
def test_image_fixed_cval__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=128)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
def test_image_cval_is_tuple(self):
# random cvals
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=(0, 255))
last_aug = None
nb_changed_aug = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(self.images)
if i == 0:
last_aug = observed_aug
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
last_aug = observed_aug
assert nb_changed_aug >= int(nb_iterations * 0.9)
def test_image_cval_is_tuple__deterministic(self):
# random cvals
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=(0, 255))
aug_det = aug.to_deterministic()
last_aug_det = None
nb_changed_aug_det = 0
nb_iterations = 10
for i in sm.xrange(nb_iterations):
observed_aug_det = aug_det.augment_images(self.images)
if i == 0:
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug_det = observed_aug_det
assert nb_changed_aug_det == 0
class TestAffine_fit_output(unittest.TestCase):
@property
def image(self):
return np.ones((3, 3, 1), dtype=np.uint8) * 255
@property
def images(self):
return np.array([self.image])
@property
def heatmaps(self):
return ia.HeatmapsOnImage(
np.float32([
[0.0, 0.5, 0.75],
[0.0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=1)]
return [ia.KeypointsOnImage(kps, shape=self.image.shape)]
def test_image_translate(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(translate_px=100, fit_output=True,
backend=backend)
observed = aug.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_keypoints_translate(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(translate_px=100, fit_output=True,
backend=backend)
observed = aug.augment_keypoints(self.kpsoi)
expected = self.kpsoi
assert keypoints_equal(observed, expected)
def test_heatmaps_translate(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(translate_px=100, fit_output=True,
backend=backend)
observed = aug.augment_heatmaps([self.heatmaps])[0]
expected = self.heatmaps
assert np.allclose(observed.arr_0to1, expected.arr_0to1)
def test_image_rot45(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=45, fit_output=True,
backend=backend)
img = np.zeros((10, 10), dtype=np.uint8)
img[0:2, 0:2] = 255
img[-2:, 0:2] = 255
img[0:2, -2:] = 255
img[-2:, -2:] = 255
img_aug = aug.augment_image(img)
_labels, nb_labels = skimage.morphology.label(
img_aug > 240, return_num=True, connectivity=2)
assert nb_labels == 4
def test_heatmaps_rot45(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=45, fit_output=True,
backend=backend)
img = np.zeros((10, 10), dtype=np.uint8)
img[0:2, 0:2] = 255
img[-2:, 0:2] = 255
img[0:2, -2:] = 255
img[-2:, -2:] = 255
hm = ia.HeatmapsOnImage(img.astype(np.float32)/255,
shape=(10, 10))
hm_aug = aug.augment_heatmaps([hm])[0]
_labels, nb_labels = skimage.morphology.label(
hm_aug.arr_0to1 > 240/255, return_num=True, connectivity=2)
assert nb_labels == 4
def test_heatmaps_rot45__heatmaps_smaller_than_image(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=45, fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
hm = HeatmapsOnImage(
ia.imresize_single_image(
img, (40, 40), interpolation="cubic"
).astype(np.float32)/255,
shape=(80, 80)
)
hm_aug = aug.augment_heatmaps([hm])[0]
# these asserts are deactivated because the image size can
# change under fit_output=True
# assert hm_aug.shape == (80, 80)
# assert hm_aug.arr_0to1.shape == (40, 40, 1)
_labels, nb_labels = skimage.morphology.label(
hm_aug.arr_0to1 > 200/255, return_num=True, connectivity=2)
assert nb_labels == 4
def test_image_heatmap_alignment_random_rots(self):
nb_iterations = 50
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
for _ in sm.xrange(nb_iterations):
aug = iaa.Affine(rotate=(0, 364), fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
hm = HeatmapsOnImage(
img.astype(np.float32)/255,
shape=(80, 80)
)
img_aug = aug.augment_image(img)
hm_aug = aug.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, img_aug.shape[0:2],
interpolation="cubic"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.95
def test_image_heatmap_alignment_random_rots__hms_smaller_than_img(self):
nb_iterations = 50
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
for _ in sm.xrange(nb_iterations):
aug = iaa.Affine(rotate=(0, 364), fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
hm = HeatmapsOnImage(
ia.imresize_single_image(
img, (40, 40), interpolation="cubic"
).astype(np.float32)/255,
shape=(80, 80)
)
img_aug = aug.augment_image(img)
hm_aug = aug.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, img_aug.shape[0:2],
interpolation="cubic"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.95
def test_segmaps_rot45(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=45, fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
segmap = SegmentationMapsOnImage(
(img > 100).astype(np.int32),
shape=(80, 80)
)
segmap_aug = aug.augment_segmentation_maps([segmap])[0]
# these asserts are deactivated because the image size can
# change under fit_output=True
# assert segmap_aug.shape == (80, 80)
# assert segmap_aug.arr_0to1.shape == (40, 40, 1)
_labels, nb_labels = skimage.morphology.label(
segmap_aug.arr > 0, return_num=True, connectivity=2)
assert nb_labels == 4
def test_segmaps_rot45__segmaps_smaller_than_img(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=45, fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
segmap = SegmentationMapsOnImage(
(
ia.imresize_single_image(
img, (40, 40), interpolation="cubic"
) > 100
).astype(np.int32),
shape=(80, 80)
)
segmap_aug = aug.augment_segmentation_maps([segmap])[0]
# these asserts are deactivated because the image size can
# change under fit_output=True
# assert segmap_aug.shape == (80, 80)
# assert segmap_aug.arr_0to1.shape == (40, 40, 1)
_labels, nb_labels = skimage.morphology.label(
segmap_aug.arr > 0, return_num=True, connectivity=2)
assert nb_labels == 4
def test_image_segmap_alignment_random_rots(self):
nb_iterations = 50
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
for _ in sm.xrange(nb_iterations):
aug = iaa.Affine(rotate=(0, 364), fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
segmap = SegmentationMapsOnImage(
(img > 100).astype(np.int32),
shape=(80, 80)
)
img_aug = aug.augment_image(img)
segmap_aug = aug.augment_segmentation_maps([segmap])[0]
img_aug_mask = img_aug > 100
segmap_aug_mask = ia.imresize_single_image(
segmap_aug.arr,
img_aug.shape[0:2],
interpolation="nearest"
) > 0
same = np.sum(img_aug_mask == segmap_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.95
def test_image_segmap_alignment_random_rots__sms_smaller_than_img(self):
nb_iterations = 50
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
for _ in sm.xrange(nb_iterations):
aug = iaa.Affine(rotate=(0, 364), fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
segmap = SegmentationMapsOnImage(
(
ia.imresize_single_image(
img, (40, 40), interpolation="cubic"
) > 100
).astype(np.int32),
shape=(80, 80)
)
img_aug = aug.augment_image(img)
segmap_aug = aug.augment_segmentation_maps([segmap])[0]
img_aug_mask = img_aug > 100
segmap_aug_mask = ia.imresize_single_image(
segmap_aug.arr,
img_aug.shape[0:2],
interpolation="nearest"
) > 0
same = np.sum(img_aug_mask == segmap_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.95
def test_keypoints_rot90_without_fit_output(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=90, backend=backend)
kps = ia.KeypointsOnImage([ia.Keypoint(10, 10)],
shape=(100, 200, 3))
kps_aug = aug.augment_keypoints(kps)
assert kps_aug.shape == (100, 200, 3)
assert not np.allclose(
[kps_aug.keypoints[0].x, kps_aug.keypoints[0].y],
[kps.keypoints[0].x, kps.keypoints[0].y],
atol=1e-2, rtol=0)
def test_keypoints_rot90(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=90, fit_output=True, backend=backend)
kps = ia.KeypointsOnImage([ia.Keypoint(10, 10)],
shape=(100, 200, 3))
kps_aug = aug.augment_keypoints(kps)
assert kps_aug.shape == (200, 100, 3)
assert not np.allclose(
[kps_aug.keypoints[0].x, kps_aug.keypoints[0].y],
[kps.keypoints[0].x, kps.keypoints[0].y],
atol=1e-2, rtol=0)
def test_empty_keypoints_rot90(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=90, fit_output=True, backend=backend)
kps = ia.KeypointsOnImage([], shape=(100, 200, 3))
kps_aug = aug.augment_keypoints(kps)
assert kps_aug.shape == (200, 100, 3)
assert len(kps_aug.keypoints) == 0
def _test_cbaoi_rot90_without_fit_output(self, cbaoi, augf_name):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
# verify that shape in PolygonsOnImages changes
aug = iaa.Affine(rotate=90, backend=backend)
cbaoi_aug = getattr(aug, augf_name)([cbaoi, cbaoi])
assert len(cbaoi_aug) == 2
for cbaoi_aug_i in cbaoi_aug:
if isinstance(cbaoi, (ia.PolygonsOnImage,
ia.LineStringsOnImage)):
assert cbaoi_aug_i.shape == cbaoi.shape
assert not cbaoi_aug_i.items[0].coords_almost_equals(
cbaoi.items[0].coords, max_distance=1e-2)
else:
assert_cbaois_equal(cbaoi_aug_i, cbaoi)
def test_polygons_rot90_without_fit_output(self):
psoi = ia.PolygonsOnImage([
ia.Polygon([(10, 10), (20, 10), (20, 20)])
], shape=(100, 200, 3))
self._test_cbaoi_rot90_without_fit_output(psoi, "augment_polygons")
def test_line_strings_rot90_without_fit_output(self):
lsoi = ia.LineStringsOnImage([
ia.LineString([(10, 10), (20, 10), (20, 20), (10, 10)])
], shape=(100, 200, 3))
self._test_cbaoi_rot90_without_fit_output(lsoi, "augment_line_strings")
def _test_cbaoi_rot90(self, cbaoi, expected, augf_name):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=90, fit_output=True, backend=backend)
cbaoi_aug = getattr(aug, augf_name)([cbaoi, cbaoi])
assert len(cbaoi_aug) == 2
for cbaoi_aug_i in cbaoi_aug:
assert_cbaois_equal(cbaoi_aug_i, expected)
def test_polygons_rot90(self):
psoi = ia.PolygonsOnImage([
ia.Polygon([(10, 10), (20, 10), (20, 20)])
], shape=(100, 200, 3))
expected = ia.PolygonsOnImage([
ia.Polygon([(100-10-1, 10), (100-10-1, 20), (100-20-1, 20)])
], shape=(200, 100, 3))
self._test_cbaoi_rot90(psoi, expected, "augment_polygons")
def test_line_strings_rot90(self):
lsoi = ia.LineStringsOnImage([
ia.LineString([(10, 10), (20, 10), (20, 20), (10, 10)])
], shape=(100, 200, 3))
expected = ia.LineStringsOnImage([
ia.LineString([(100-10-1, 10), (100-10-1, 20), (100-20-1, 20),
(100-10-1, 10)])
], shape=(200, 100, 3))
self._test_cbaoi_rot90(lsoi, expected, "augment_line_strings")
def test_bounding_boxes_rot90(self):
lsoi = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=10, y1=10, x2=20, y2=20)
], shape=(100, 200, 3))
expected = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=100-20-1, y1=10, x2=100-10-1, y2=20)
], shape=(200, 100, 3))
self._test_cbaoi_rot90(lsoi, expected, "augment_bounding_boxes")
def _test_empty_cbaoi_rot90(self, cbaoi, expected, augf_name):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=90, fit_output=True, backend=backend)
cbaoi_aug = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(cbaoi_aug, expected)
def test_empty_polygons_rot90(self):
psoi = ia.PolygonsOnImage([], shape=(100, 200, 3))
expected = ia.PolygonsOnImage([], shape=(200, 100, 3))
self._test_empty_cbaoi_rot90(psoi, expected, "augment_polygons")
def test_empty_line_strings_rot90(self):
lsoi = ia.LineStringsOnImage([], shape=(100, 200, 3))
expected = ia.LineStringsOnImage([], shape=(200, 100, 3))
self._test_empty_cbaoi_rot90(lsoi, expected, "augment_line_strings")
def test_empty_bounding_boxes_rot90(self):
bbsoi = ia.BoundingBoxesOnImage([], shape=(100, 200, 3))
expected = ia.BoundingBoxesOnImage([], shape=(200, 100, 3))
self._test_empty_cbaoi_rot90(bbsoi, expected, "augment_bounding_boxes")
# TODO merge these into TestAffine_rotate since they are rotations?
# or extend to contain other affine params too?
class TestAffine_alignment(unittest.TestCase):
def setUp(self):
reseed()
def test_image_keypoint_alignment(self):
aug = iaa.Affine(rotate=[0, 180], order=0)
img = np.zeros((10, 10), dtype=np.uint8)
img[0:5, 5] = 255
img[2, 4:6] = 255
img_rot = [np.copy(img), np.copy(np.flipud(np.fliplr(img)))]
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=5, y=2)], shape=img.shape)
kpsoi_rot = [(5, 2), (5, 10-2)]
img_aug_indices = []
kpsois_aug_indices = []
for _ in sm.xrange(40):
aug_det = aug.to_deterministic()
imgs_aug = aug_det.augment_images([img, img])
kpsois_aug = aug_det.augment_keypoints([kpsoi, kpsoi])
assert kpsois_aug[0].shape == img.shape
assert kpsois_aug[1].shape == img.shape
for img_aug in imgs_aug:
if np.array_equal(img_aug, img_rot[0]):
img_aug_indices.append(0)
elif np.array_equal(img_aug, img_rot[1]):
img_aug_indices.append(1)
else:
assert False
for kpsoi_aug in kpsois_aug:
similar_to_rot_0 = np.allclose(
[kpsoi_aug.keypoints[0].x, kpsoi_aug.keypoints[0].y],
kpsoi_rot[0])
similar_to_rot_180 = np.allclose(
[kpsoi_aug.keypoints[0].x, kpsoi_aug.keypoints[0].y],
kpsoi_rot[1])
if similar_to_rot_0:
kpsois_aug_indices.append(0)
elif similar_to_rot_180:
kpsois_aug_indices.append(1)
else:
assert False
assert np.array_equal(img_aug_indices, kpsois_aug_indices)
assert len(set(img_aug_indices)) == 2
assert len(set(kpsois_aug_indices)) == 2
@classmethod
def _test_image_cbaoi_alignment(cls, cbaoi, cbaoi_rot, augf_name):
aug = iaa.Affine(rotate=[0, 180], order=0)
img = np.zeros((10, 10), dtype=np.uint8)
img[0:5, 5] = 255
img[2, 4:6] = 255
img_rot = [np.copy(img), np.copy(np.flipud(np.fliplr(img)))]
img_aug_indices = []
cbaois_aug_indices = []
for _ in sm.xrange(40):
aug_det = aug.to_deterministic()
imgs_aug = aug_det.augment_images([img, img])
cbaois_aug = getattr(aug_det, augf_name)([cbaoi, cbaoi])
assert cbaois_aug[0].shape == img.shape
assert cbaois_aug[1].shape == img.shape
if hasattr(cbaois_aug[0].items[0], "is_valid"):
assert cbaois_aug[0].items[0].is_valid
assert cbaois_aug[1].items[0].is_valid
for img_aug in imgs_aug:
if np.array_equal(img_aug, img_rot[0]):
img_aug_indices.append(0)
elif np.array_equal(img_aug, img_rot[1]):
img_aug_indices.append(1)
else:
assert False
for cbaoi_aug in cbaois_aug:
if cbaoi_aug.items[0].coords_almost_equals(cbaoi_rot[0]):
cbaois_aug_indices.append(0)
elif cbaoi_aug.items[0].coords_almost_equals(cbaoi_rot[1]):
cbaois_aug_indices.append(1)
else:
assert False
assert np.array_equal(img_aug_indices, cbaois_aug_indices)
assert len(set(img_aug_indices)) == 2
assert len(set(cbaois_aug_indices)) == 2
def test_image_polygon_alignment(self):
psoi = ia.PolygonsOnImage([ia.Polygon([(1, 1), (9, 1), (5, 5)])],
shape=(10, 10))
psoi_rot = [
psoi.polygons[0].deepcopy(),
ia.Polygon([(10-1, 10-1), (10-9, 10-1), (10-5, 10-5)])
]
self._test_image_cbaoi_alignment(psoi, psoi_rot,
"augment_polygons")
def test_image_line_string_alignment(self):
lsoi = ia.LineStringsOnImage([ia.LineString([(1, 1), (9, 1), (5, 5)])],
shape=(10, 10))
lsoi_rot = [
lsoi.items[0].deepcopy(),
ia.LineString([(10-1, 10-1), (10-9, 10-1), (10-5, 10-5)])
]
self._test_image_cbaoi_alignment(lsoi, lsoi_rot,
"augment_line_strings")
def test_image_bounding_box_alignment(self):
bbsoi = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=1, y1=1, x2=9, y2=5)], shape=(10, 10))
bbsoi_rot = [
bbsoi.items[0].deepcopy(),
ia.BoundingBox(x1=10-9, y1=10-5, x2=10-1, y2=10-1)]
self._test_image_cbaoi_alignment(bbsoi, bbsoi_rot,
"augment_bounding_boxes")
class TestAffine_other_dtypes(unittest.TestCase):
@property
def translate_mask(self):
mask = np.zeros((3, 3), dtype=bool)
mask[1, 2] = True
return mask
@property
def image(self):
image = np.zeros((17, 17), dtype=bool)
image[2:15, 5:13] = True
return image
@property
def rot_mask_inner(self):
img_flipped = iaa.Fliplr(1.0)(image=self.image)
return img_flipped == 1
@property
def rot_mask_outer(self):
img_flipped = iaa.Fliplr(1.0)(image=self.image)
return img_flipped == 0
@property
def rot_thresh_inner(self):
return 0.9
@property
def rot_thresh_outer(self):
return 0.9
def rot_thresh_inner_float(self, order):
return 0.85 if order == 1 else 0.7
def rot_thresh_outer_float(self, order):
return 0.85 if order == 1 else 0.4
def test_translate_skimage_order_0_bool(self):
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="skimage")
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert np.all(image_aug[~self.translate_mask] == 0)
assert np.all(image_aug[self.translate_mask] == 1)
def test_translate_skimage_order_0_uint_int(self):
dtypes = ["uint8", "uint16", "uint32", "int8", "int16", "int32"]
for dtype in dtypes:
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="skimage")
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
for value in values:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug[~self.translate_mask] == 0)
assert np.all(image_aug[self.translate_mask] == value)
def test_translate_skimage_order_0_float(self):
# float
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="skimage")
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(_isclose(image_aug[~self.translate_mask], 0))
assert np.all(_isclose(image_aug[self.translate_mask],
np.float128(value)))
def test_rotate_skimage_order_not_0_bool(self):
# skimage, order!=0 and rotate=180
for order in [1, 3, 4, 5]:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="skimage")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
image = np.zeros((17, 17), dtype=bool)
image[2:15, 5:13] = True
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert (
np.sum(image_aug == image_exp)/image.size
) > self.rot_thresh_inner
def test_rotate_skimage_order_not_0_uint_int(self):
def _compute_matching(image_aug, image_exp, mask):
return np.sum(
np.isclose(image_aug[mask], image_exp[mask], rtol=0,
atol=1.001)
) / np.sum(mask)
dtypes = ["uint8", "uint16", "uint32", "int8", "int16", "int32"]
for dtype in dtypes:
for order in [1, 3, 4, 5]:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="skimage")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
for value in values:
with self.subTest(dtype=dtype, order=order, value=value):
image = np.zeros((17, 17), dtype=dtype)
image[2:15, 5:13] = value
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == dtype
assert _compute_matching(
image_aug, image_exp, self.rot_mask_inner
) > self.rot_thresh_inner
assert _compute_matching(
image_aug, image_exp, self.rot_mask_outer
) > self.rot_thresh_outer
def test_rotate_skimage_order_not_0_float(self):
def _compute_matching(image_aug, image_exp, mask):
return np.sum(
_isclose(image_aug[mask], image_exp[mask])
) / np.sum(mask)
for order in [1, 3, 4, 5]:
dtypes = ["float16", "float32", "float64"]
if order == 5:
# float64 caused too many interpolation inaccuracies for
# order=5, not wrong but harder to test
dtypes = ["float16", "float32"]
for dtype in dtypes:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="skimage")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
if order not in [0, 1]:
atol = 1e-2
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
if order not in [3, 4]: # results in NaNs otherwise
values = values + [min_value, max_value]
for value in values:
with self.subTest(order=order, dtype=dtype, value=value):
image = np.zeros((17, 17), dtype=dtype)
image[2:15, 5:13] = value
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == dtype
assert _compute_matching(
image_aug, image_exp, self.rot_mask_inner
) > self.rot_thresh_inner_float(order)
assert _compute_matching(
image_aug, image_exp, self.rot_mask_outer
) > self.rot_thresh_outer_float(order)
def test_translate_cv2_order_0_bool(self):
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="cv2")
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert np.all(image_aug[~self.translate_mask] == 0)
assert np.all(image_aug[self.translate_mask] == 1)
def test_translate_cv2_order_0_uint_int(self):
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="cv2")
dtypes = ["uint8", "uint16", "int8", "int16", "int32"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug[~self.translate_mask] == 0)
assert np.all(image_aug[self.translate_mask] == value)
def test_translate_cv2_order_0_float(self):
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="cv2")
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(_isclose(image_aug[~self.translate_mask], 0))
assert np.all(_isclose(image_aug[self.translate_mask],
np.float128(value)))
def test_rotate_cv2_order_1_and_3_bool(self):
# cv2, order=1 and rotate=180
for order in [1, 3]:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="cv2")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
image = np.zeros((17, 17), dtype=bool)
image[2:15, 5:13] = True
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert (np.sum(image_aug == image_exp) / image.size) > 0.9
def test_rotate_cv2_order_1_and_3_uint_int(self):
# cv2, order=1 and rotate=180
for order in [1, 3]:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="cv2")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
dtypes = ["uint8", "uint16", "int8", "int16"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
for value in values:
with self.subTest(order=order, dtype=dtype, value=value):
image = np.zeros((17, 17), dtype=dtype)
image[2:15, 5:13] = value
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == dtype
assert (
np.sum(image_aug == image_exp) / image.size
) > 0.9
def test_rotate_cv2_order_1_and_3_float(self):
# cv2, order=1 and rotate=180
for order in [1, 3]:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="cv2")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
with self.subTest(order=order, dtype=dtype, value=value):
image = np.zeros((17, 17), dtype=dtype)
image[2:15, 5:13] = value
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == dtype
assert (
np.sum(_isclose(image_aug, image_exp)) / image.size
) > 0.9
class TestAffine_other(unittest.TestCase):
def test_unusual_channel_numbers(self):
nb_channels_lst = [4, 5, 512, 513]
orders = [0, 1, 3]
backends = ["auto", "skimage", "cv2"]
for nb_channels, order, backend in itertools.product(nb_channels_lst,
orders, backends):
with self.subTest(nb_channels=nb_channels, order=order,
backend=backend):
aug = iaa.Affine(translate_px={"x": -1}, mode="constant",
cval=255, order=order, backend=backend)
image = np.full((3, 3, nb_channels), 128, dtype=np.uint8)
heatmap_arr = np.full((3, 3, nb_channels), 0.5,
dtype=np.float32)
heatmap = ia.HeatmapsOnImage(heatmap_arr, shape=image.shape)
image_aug, heatmap_aug = aug(image=image, heatmaps=heatmap)
hm_aug_arr = heatmap_aug.arr_0to1
assert image_aug.shape == (3, 3, nb_channels)
assert heatmap_aug.arr_0to1.shape == (3, 3, nb_channels)
assert heatmap_aug.shape == image.shape
assert np.allclose(image_aug[:, 0:2, :], 128, rtol=0, atol=2)
assert np.allclose(image_aug[:, 2:3, 0:3], 255, rtol=0, atol=2)
assert np.allclose(image_aug[:, 2:3, 3:], 255, rtol=0, atol=2)
assert np.allclose(hm_aug_arr[:, 0:2, :], 0.5, rtol=0,
atol=0.025)
assert np.allclose(hm_aug_arr[:, 2:3, :], 0.0, rtol=0,
atol=0.025)
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 1),
(1, 0, 1)
]
for fit_output in [False, True]:
for shape in shapes:
with self.subTest(shape=shape, fit_output=fit_output):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Affine(rotate=45, fit_output=fit_output)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
# TODO migrate to unittest and split up tests or remove AffineCv2
def test_AffineCv2():
reseed()
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=2, y=2)]
keypoints = [ia.KeypointsOnImage(kps, shape=base_img.shape)]
# no translation/scale/rotate/shear, shouldnt change nothing
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# ---------------------
# scale
# ---------------------
# zoom in
aug = iaa.AffineCv2(scale=1.75, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
# zoom in only on x axis
aug = iaa.AffineCv2(scale={"x": 1.75, "y": 1.0}, translate_px=0,
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
# zoom in only on y axis
aug = iaa.AffineCv2(scale={"x": 1.0, "y": 1.75}, translate_px=0,
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
# zoom out
# this one uses a 4x4 area of all 255, which is zoomed out to a 4x4 area
# in which the center 2x2 area is 255
# zoom in should probably be adapted to this style
# no separate tests here for x/y axis, should work fine if zoom in
# works with that
aug = iaa.AffineCv2(scale=0.49, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.ones((4, 4, 1), dtype=np.uint8) * 255
images = np.array([image])
images_list = [image]
outer_pixels = ([], [])
for y in sm.xrange(4):
xs = sm.xrange(4) if y in [0, 3] else [0, 3]
for x in xs:
outer_pixels[0].append(y)
outer_pixels[1].append(x)
inner_pixels = ([1, 1, 2, 2], [1, 2, 1, 2])
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=3, y=0),
ia.Keypoint(x=0, y=3), ia.Keypoint(x=3, y=3)]
keypoints = [ia.KeypointsOnImage(kps, shape=image.shape)]
kps_aug = [ia.Keypoint(x=0.765, y=0.765), ia.Keypoint(x=2.235, y=0.765),
ia.Keypoint(x=0.765, y=2.235), ia.Keypoint(x=2.235, y=2.235)]
keypoints_aug = [ia.KeypointsOnImage(kps_aug, shape=image.shape)]
observed = aug.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug_det.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug_det.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# varying scales
aug = iaa.AffineCv2(scale={"x": (0.5, 1.5), "y": (0.5, 1.5)},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 2, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint8) * 100
image = image[:, :, np.newaxis]
images = np.array([image])
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
aug = iaa.AffineCv2(scale=iap.Uniform(0.7, 0.9))
assert isinstance(aug.scale, iap.Uniform)
assert isinstance(aug.scale.a, iap.Deterministic)
assert isinstance(aug.scale.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.scale.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.scale.b.value < 0.9 + 1e-8
# ---------------------
# translate
# ---------------------
# move one pixel to the right
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)],
shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)],
shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move one pixel to the right
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with order=ALL
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0, order=ia.ALL)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with order=list
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0, order=[0, 1, 2])
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with order=StochasticParameter
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0, order=iap.Choice([0, 1, 2]))
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the bottom
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 0, "y": 1},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)],
shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)],
shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the right
aug = iaa.AffineCv2(scale=1.0, translate_percent={"x": 0.3333, "y": 0},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)],
shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)],
shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the bottom
aug = iaa.AffineCv2(scale=1.0, translate_percent={"x": 0, "y": 0.3333},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)],
shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)],
shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# 0-1px to left/right and 0-1px to top/bottom
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": (-1, 1), "y": (-1, 1)},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
centers_aug = np.copy(image).astype(np.int32) * 0
centers_aug_det = np.copy(image).astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert len(observed_aug[0].nonzero()[0]) == 1
assert len(observed_aug_det[0].nonzero()[0]) == 1
centers_aug += (observed_aug[0] > 0)
centers_aug_det += (observed_aug_det[0] > 0)
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
assert (centers_aug > int(nb_iterations * (1/9 * 0.6))).all()
assert (centers_aug < int(nb_iterations * (1/9 * 1.4))).all()
aug = iaa.AffineCv2(translate_percent=iap.Uniform(0.7, 0.9))
assert isinstance(aug.translate, iap.Uniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.translate.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.translate.b.value < 0.9 + 1e-8
aug = iaa.AffineCv2(translate_px=iap.DiscreteUniform(1, 10))
assert isinstance(aug.translate, iap.DiscreteUniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert aug.translate.a.value == 1
assert aug.translate.b.value == 10
# ---------------------
# translate heatmaps
# ---------------------
heatmaps = HeatmapsOnImage(
np.float32([
[0.0, 0.5, 0.75],
[0.0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
arr_expected_1px_right = np.float32([
[0.0, 0.0, 0.5],
[0.0, 0.0, 0.5],
[0.0, 0.75, 0.75],
])
aug = iaa.AffineCv2(translate_px={"x": 1})
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert np.isclose(observed.min_value, heatmaps.min_value, rtol=0, atol=1e-6)
assert np.isclose(observed.max_value, heatmaps.max_value, rtol=0, atol=1e-6)
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# should still use mode=constant cval=0 even when other settings chosen
aug = iaa.AffineCv2(translate_px={"x": 1}, cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert np.isclose(observed.min_value, heatmaps.min_value, rtol=0, atol=1e-6)
assert np.isclose(observed.max_value, heatmaps.max_value, rtol=0, atol=1e-6)
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
aug = iaa.AffineCv2(translate_px={"x": 1}, mode="replicate", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert np.isclose(observed.min_value, heatmaps.min_value, rtol=0, atol=1e-6)
assert np.isclose(observed.max_value, heatmaps.max_value, rtol=0, atol=1e-6)
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# ---------------------
# translate segmaps
# ---------------------
segmaps = SegmentationMapsOnImage(
np.int32([
[0, 1, 2],
[0, 1, 2],
[2, 2, 2],
]),
shape=(3, 3, 3)
)
arr_expected_1px_right = np.int32([
[0, 0, 1],
[0, 0, 1],
[0, 2, 2],
])
aug = iaa.AffineCv2(translate_px={"x": 1})
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == segmaps.shape
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# should still use mode=constant cval=0 even when other settings chosen
aug = iaa.AffineCv2(translate_px={"x": 1}, cval=255)
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == segmaps.shape
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
aug = iaa.AffineCv2(translate_px={"x": 1}, mode="replicate", cval=255)
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == segmaps.shape
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# ---------------------
# rotate
# ---------------------
# rotate by 45 degrees
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=90, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, :] = 255
image_aug[0, 1] = 255
image_aug[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
kps = [ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=1)]
keypoints = [ia.KeypointsOnImage(kps, shape=base_img.shape)]
kps_aug = [ia.Keypoint(x=1, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=1, y=2)]
keypoints_aug = [ia.KeypointsOnImage(kps_aug, shape=base_img.shape)]
observed = aug.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# rotate by StochasticParameter
aug = iaa.AffineCv2(scale=1.0, translate_px=0,
rotate=iap.Uniform(10, 20), shear=0)
assert isinstance(aug.rotate, iap.Uniform)
assert isinstance(aug.rotate.a, iap.Deterministic)
assert aug.rotate.a.value == 10
assert isinstance(aug.rotate.b, iap.Deterministic)
assert aug.rotate.b.value == 20
# random rotation 0-364 degrees
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=(0, 364), shear=0)
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
pixels_sums_aug = np.copy(image).astype(np.int32) * 0
pixels_sums_aug_det = np.copy(image).astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
pixels_sums_aug += (observed_aug[0] > 100)
pixels_sums_aug_det += (observed_aug_det[0] > 100)
assert nb_changed_aug >= int(nb_iterations * 0.9)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
# outer pixels, should sometimes be white
# the values here had to be set quite tolerant, the middle pixels at
# top/left/bottom/right get more activation than expected
outer_pixels = ([0, 0, 0, 1, 1, 2, 2, 2], [0, 1, 2, 0, 2, 0, 1, 2])
assert (
pixels_sums_aug[outer_pixels] > int(nb_iterations * (2/8 * 0.4))
).all()
assert (
pixels_sums_aug[outer_pixels] < int(nb_iterations * (2/8 * 2.0))
).all()
# ---------------------
# shear
# ---------------------
# TODO
# shear by StochasticParameter
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0,
shear=iap.Uniform(10, 20))
assert isinstance(aug.shear, iap.Uniform)
assert isinstance(aug.shear.a, iap.Deterministic)
assert aug.shear.a.value == 10
assert isinstance(aug.shear.b, iap.Deterministic)
assert aug.shear.b.value == 20
# ---------------------
# cval
# ---------------------
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=128)
aug_det = aug.to_deterministic()
image = np.ones((3, 3, 1), dtype=np.uint8) * 255
image_aug = np.copy(image)
images = np.array([image])
images_list = [image]
observed = aug.augment_images(images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug_det.augment_images(images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug.augment_images(images_list)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug_det.augment_images(images_list)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
# random cvals
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=(0, 255))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
averages = []
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
averages.append(int(np.average(observed_aug)))
assert nb_changed_aug >= int(nb_iterations * 0.9)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
assert len(set(averages)) > 200
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=ia.ALL)
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=iap.DiscreteUniform(1, 5))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 1
assert aug.cval.b.value == 5
# ------------
# mode
# ------------
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode="replicate")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "replicate"
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=["replicate", "reflect"])
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "replicate" in aug.mode.a
and "reflect" in aug.mode.a)
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0,
mode=iap.Choice(["replicate", "reflect"]))
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "replicate" in aug.mode.a
and "reflect" in aug.mode.a)
# ------------
# exceptions for bad inputs
# ------------
# scale
got_exception = False
try:
_ = iaa.AffineCv2(scale=False)
except Exception:
got_exception = True
assert got_exception
# translate_px
got_exception = False
try:
_ = iaa.AffineCv2(translate_px=False)
except Exception:
got_exception = True
assert got_exception
# translate_percent
got_exception = False
try:
_ = iaa.AffineCv2(translate_percent=False)
except Exception:
got_exception = True
assert got_exception
# rotate
got_exception = False
try:
_ = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=False,
shear=0, cval=0)
except Exception:
got_exception = True
assert got_exception
# shear
got_exception = False
try:
_ = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0,
shear=False, cval=0)
except Exception:
got_exception = True
assert got_exception
# cval
got_exception = False
try:
_ = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0,
shear=0, cval=None)
except Exception:
got_exception = True
assert got_exception
# mode
got_exception = False
try:
_ = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0,
shear=0, cval=0, mode=False)
except Exception:
got_exception = True
assert got_exception
# non-existent order
got_exception = False
try:
_ = iaa.AffineCv2(order=-1)
except Exception:
got_exception = True
assert got_exception
# bad order datatype
got_exception = False
try:
_ = iaa.AffineCv2(order="test")
except Exception:
got_exception = True
assert got_exception
# ----------
# get_parameters
# ----------
aug = iaa.AffineCv2(scale=1, translate_px=2, rotate=3, shear=4,
order=1, cval=0, mode="constant")
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic) # scale
assert isinstance(params[1], iap.Deterministic) # translate
assert isinstance(params[2], iap.Deterministic) # rotate
assert isinstance(params[3], iap.Deterministic) # shear
assert params[0].value == 1 # scale
assert params[1].value == 2 # translate
assert params[2].value == 3 # rotate
assert params[3].value == 4 # shear
assert params[4].value == 1 # order
assert params[5].value == 0 # cval
assert params[6].value == "constant" # mode
class TestPiecewiseAffine(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
img = np.zeros((60, 80), dtype=np.uint8)
img[:, 9:11+1] = 255
img[:, 69:71+1] = 255
return img
@property
def mask(self):
return self.image > 0
@property
def heatmaps(self):
return HeatmapsOnImage((self.image / 255.0).astype(np.float32),
shape=(60, 80, 3))
@property
def segmaps(self):
return SegmentationMapsOnImage(self.mask.astype(np.int32),
shape=(60, 80, 3))
# -----
# __init__
# -----
def test___init___scale_is_list(self):
# scale as list
aug = iaa.PiecewiseAffine(scale=[0.01, 0.10], nb_rows=12, nb_cols=4)
assert isinstance(aug.scale, iap.Choice)
assert 0.01 - 1e-8 < aug.scale.a[0] < 0.01 + 1e-8
assert 0.10 - 1e-8 < aug.scale.a[1] < 0.10 + 1e-8
def test___init___scale_is_tuple(self):
# scale as tuple
aug = iaa.PiecewiseAffine(scale=(0.01, 0.10), nb_rows=12, nb_cols=4)
assert isinstance(aug.jitter.scale, iap.Uniform)
assert isinstance(aug.jitter.scale.a, iap.Deterministic)
assert isinstance(aug.jitter.scale.b, iap.Deterministic)
assert 0.01 - 1e-8 < aug.jitter.scale.a.value < 0.01 + 1e-8
assert 0.10 - 1e-8 < aug.jitter.scale.b.value < 0.10 + 1e-8
def test___init___scale_is_stochastic_parameter(self):
# scale as StochasticParameter
aug = iaa.PiecewiseAffine(scale=iap.Uniform(0.01, 0.10), nb_rows=12,
nb_cols=4)
assert isinstance(aug.jitter.scale, iap.Uniform)
assert isinstance(aug.jitter.scale.a, iap.Deterministic)
assert isinstance(aug.jitter.scale.b, iap.Deterministic)
assert 0.01 - 1e-8 < aug.jitter.scale.a.value < 0.01 + 1e-8
assert 0.10 - 1e-8 < aug.jitter.scale.b.value < 0.10 + 1e-8
def test___init___bad_datatype_for_scale_leads_to_failure(self):
# bad datatype for scale
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=False, nb_rows=12, nb_cols=4)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___nb_rows_is_list(self):
# rows as list
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=[4, 20], nb_cols=4)
assert isinstance(aug.nb_rows, iap.Choice)
assert aug.nb_rows.a[0] == 4
assert aug.nb_rows.a[1] == 20
def test___init___nb_rows_is_tuple(self):
# rows as tuple
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=(4, 20), nb_cols=4)
assert isinstance(aug.nb_rows, iap.DiscreteUniform)
assert isinstance(aug.nb_rows.a, iap.Deterministic)
assert isinstance(aug.nb_rows.b, iap.Deterministic)
assert aug.nb_rows.a.value == 4
assert aug.nb_rows.b.value == 20
def test___init___nb_rows_is_stochastic_parameter(self):
# rows as StochasticParameter
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=iap.DiscreteUniform(4, 20),
nb_cols=4)
assert isinstance(aug.nb_rows, iap.DiscreteUniform)
assert isinstance(aug.nb_rows.a, iap.Deterministic)
assert isinstance(aug.nb_rows.b, iap.Deterministic)
assert aug.nb_rows.a.value == 4
assert aug.nb_rows.b.value == 20
def test___init___bad_datatype_for_nb_rows_leads_to_failure(self):
# bad datatype for rows
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=0.05, nb_rows=False, nb_cols=4)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___nb_cols_is_list(self):
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=[4, 20])
assert isinstance(aug.nb_cols, iap.Choice)
assert aug.nb_cols.a[0] == 4
assert aug.nb_cols.a[1] == 20
def test___init___nb_cols_is_tuple(self):
# cols as tuple
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=(4, 20))
assert isinstance(aug.nb_cols, iap.DiscreteUniform)
assert isinstance(aug.nb_cols.a, iap.Deterministic)
assert isinstance(aug.nb_cols.b, iap.Deterministic)
assert aug.nb_cols.a.value == 4
assert aug.nb_cols.b.value == 20
def test___init___nb_cols_is_stochastic_parameter(self):
# cols as StochasticParameter
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4,
nb_cols=iap.DiscreteUniform(4, 20))
assert isinstance(aug.nb_cols, iap.DiscreteUniform)
assert isinstance(aug.nb_cols.a, iap.Deterministic)
assert isinstance(aug.nb_cols.b, iap.Deterministic)
assert aug.nb_cols.a.value == 4
assert aug.nb_cols.b.value == 20
def test___init___bad_datatype_for_nb_cols_leads_to_failure(self):
# bad datatype for cols
got_exception = False
try:
_aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___order_is_int(self):
# single int for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, order=0)
assert isinstance(aug.order, iap.Deterministic)
assert aug.order.value == 0
def test___init___order_is_list(self):
# list for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
order=[0, 1, 3])
assert isinstance(aug.order, iap.Choice)
assert all([v in aug.order.a for v in [0, 1, 3]])
def test___init___order_is_stochastic_parameter(self):
# StochasticParameter for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
order=iap.Choice([0, 1, 3]))
assert isinstance(aug.order, iap.Choice)
assert all([v in aug.order.a for v in [0, 1, 3]])
def test___init___order_is_all(self):
# ALL for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
order=ia.ALL)
assert isinstance(aug.order, iap.Choice)
assert all([v in aug.order.a for v in [0, 1, 3, 4, 5]])
def test___init___bad_datatype_for_order_leads_to_failure(self):
# bad datatype for order
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
order=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___cval_is_list(self):
# cval as list
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=5, nb_cols=5,
mode="constant", cval=[0, 10])
assert isinstance(aug.cval, iap.Choice)
assert aug.cval.a[0] == 0
assert aug.cval.a[1] == 10
def test___init___cval_is_tuple(self):
# cval as tuple
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode="constant", cval=(0, 10))
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 10
def test___init___cval_is_stochastic_parameter(self):
# cval as StochasticParameter
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode="constant",
cval=iap.DiscreteUniform(0, 10))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 10
def test___init___cval_is_all(self):
# ALL as cval
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode="constant", cval=ia.ALL)
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
def test___init___bad_datatype_for_cval_leads_to_failure(self):
# bas datatype for cval
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, cval=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___mode_is_string(self):
# single string for mode
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode="nearest")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "nearest"
def test___init___mode_is_list(self):
# list for mode
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode=["nearest", "edge", "symmetric"])
assert isinstance(aug.mode, iap.Choice)
assert all([
v in aug.mode.a for v in ["nearest", "edge", "symmetric"]
])
def test___init___mode_is_stochastic_parameter(self):
# StochasticParameter for mode
aug = iaa.PiecewiseAffine(
scale=0.1, nb_rows=8, nb_cols=8,
mode=iap.Choice(["nearest", "edge", "symmetric"]))
assert isinstance(aug.mode, iap.Choice)
assert all([
v in aug.mode.a for v in ["nearest", "edge", "symmetric"]
])
def test___init___mode_is_all(self):
# ALL for mode
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
assert all([
v in aug.mode.a
for v
in ["constant", "edge", "symmetric", "reflect", "wrap"]
])
def test___init___bad_datatype_for_mode_leads_to_failure(self):
# bad datatype for mode
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# -----
# scale
# -----
def test_scale_is_small_image(self):
# basic test
aug = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
observed = aug.augment_image(self.image)
assert (
100.0
< np.average(observed[self.mask])
< np.average(self.image[self.mask])
)
assert (
100.0-75.0
> np.average(observed[~self.mask])
> np.average(self.image[~self.mask])
)
def test_scale_is_small_image_absolute_scale(self):
aug = iaa.PiecewiseAffine(scale=1, nb_rows=12, nb_cols=4,
absolute_scale=True)
observed = aug.augment_image(self.image)
assert (
100.0
< np.average(observed[self.mask])
< np.average(self.image[self.mask])
)
assert (
100.0-75.0
> np.average(observed[~self.mask])
> np.average(self.image[~self.mask])
)
def test_scale_is_small_heatmaps(self):
# basic test, heatmaps
aug = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
observed = aug.augment_heatmaps([self.heatmaps])[0]
observed_arr = observed.get_arr()
assert observed.shape == self.heatmaps.shape
_assert_same_min_max(observed, self.heatmaps)
assert (
100.0/255.0
< np.average(observed_arr[self.mask])
< np.average(self.heatmaps.get_arr()[self.mask]))
assert (
(100.0-75.0)/255.0
> np.average(observed_arr[~self.mask])
> np.average(self.heatmaps.get_arr()[~self.mask]))
def test_scale_is_small_segmaps(self):
# basic test, segmaps
aug = iaa.PiecewiseAffine(scale=0.001, nb_rows=12, nb_cols=4)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
observed_arr = observed.get_arr()
# left column starts at 9-11 and right one at 69-71
# result is 9-11 (curvy, i.e. like 50% filled) and 70-71 (straight,
# i.e. 100% filled). Reason for that is unclear, maybe a scikit-image
# problem.
observed_arr_left_col = observed_arr[:, 9:11+1]
observed_arr_right_col = observed_arr[:, 69:71+1]
assert observed.shape == self.segmaps.shape
assert np.average(observed_arr_left_col == 1) > 0.5
assert np.average(observed_arr_right_col == 1) > 0.5
assert np.average(observed_arr[~self.mask] == 0) > 0.9
def test_scale_is_zero_image(self):
# scale 0
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)
observed = aug.augment_image(self.image)
assert np.array_equal(observed, self.image)
def test_scale_is_zero_image_absolute_scale(self):
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4,
absolute_scale=True)
observed = aug.augment_image(self.image)
assert np.array_equal(observed, self.image)
def test_scale_is_zero_heatmaps(self):
# scale 0, heatmaps
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)
observed = aug.augment_heatmaps([self.heatmaps])[0]
observed_arr = observed.get_arr()
assert observed.shape == self.heatmaps.shape
_assert_same_min_max(observed, self.heatmaps)
assert np.array_equal(observed_arr, self.heatmaps.get_arr())
def test_scale_is_zero_segmaps(self):
# scale 0, segmaps
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
observed_arr = observed.get_arr()
assert observed.shape == self.segmaps.shape
assert np.array_equal(observed_arr, self.segmaps.get_arr())
def test_scale_is_zero_keypoints(self):
# scale 0, keypoints
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)
kps = [ia.Keypoint(x=5, y=3), ia.Keypoint(x=3, y=8)]
kpsoi = ia.KeypointsOnImage(kps, shape=(14, 14, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
assert_cbaois_equal(kpsoi_aug, kpsoi)
@classmethod
def _test_scale_is_zero_cbaoi(cls, cbaoi, augf_name):
aug = iaa.PiecewiseAffine(scale=0, nb_rows=10, nb_cols=10)
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi)
def test_scale_is_zero_polygons(self):
exterior = [(10, 10),
(70, 10), (70, 20), (70, 30), (70, 40),
(70, 50), (70, 60), (70, 70), (70, 80),
(70, 90),
(10, 90),
(10, 80), (10, 70), (10, 60), (10, 50),
(10, 40), (10, 30), (10, 20), (10, 10)]
poly = ia.Polygon(exterior)
psoi = ia.PolygonsOnImage([poly, poly.shift(left=1, top=1)],
shape=(100, 80))
self._test_scale_is_zero_cbaoi(psoi, "augment_polygons")
def test_scale_is_zero_line_strings(self):
coords = [(10, 10),
(70, 10), (70, 20), (70, 30), (70, 40),
(70, 50), (70, 60), (70, 70), (70, 80),
(70, 90),
(10, 90),
(10, 80), (10, 70), (10, 60), (10, 50),
(10, 40), (10, 30), (10, 20), (10, 10)]
ls = ia.LineString(coords)
lsoi = ia.LineStringsOnImage([ls, ls.shift(left=1, top=1)],
shape=(100, 80))
self._test_scale_is_zero_cbaoi(lsoi, "augment_line_strings")
def test_scale_is_zero_bounding_boxes(self):
bb = ia.BoundingBox(x1=10, y1=10, x2=70, y2=20)
bbsoi = ia.BoundingBoxesOnImage([bb, bb.shift(left=1, top=1)],
shape=(100, 80))
self._test_scale_is_zero_cbaoi(bbsoi, "augment_bounding_boxes")
def test_scale_stronger_values_should_increase_changes_images(self):
# stronger scale should lead to stronger changes
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
observed1 = aug1.augment_image(self.image)
observed2 = aug2.augment_image(self.image)
assert (
np.average(observed1[~self.mask])
< np.average(observed2[~self.mask])
)
def test_scale_stronger_values_should_increase_changes_images_abs(self):
aug1 = iaa.PiecewiseAffine(scale=1, nb_rows=12, nb_cols=4,
absolute_scale=True)
aug2 = iaa.PiecewiseAffine(scale=10, nb_rows=12, nb_cols=4,
absolute_scale=True)
observed1 = aug1.augment_image(self.image)
observed2 = aug2.augment_image(self.image)
assert (
np.average(observed1[~self.mask])
< np.average(observed2[~self.mask])
)
def test_scale_stronger_values_should_increase_changes_heatmaps(self):
# stronger scale should lead to stronger changes, heatmaps
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
observed1 = aug1.augment_heatmaps([self.heatmaps])[0]
observed2 = aug2.augment_heatmaps([self.heatmaps])[0]
observed1_arr = observed1.get_arr()
observed2_arr = observed2.get_arr()
assert observed1.shape == self.heatmaps.shape
assert observed2.shape == self.heatmaps.shape
_assert_same_min_max(observed1, self.heatmaps)
_assert_same_min_max(observed2, self.heatmaps)
assert (
np.average(observed1_arr[~self.mask])
< np.average(observed2_arr[~self.mask])
)
def test_scale_stronger_values_should_increase_changes_heatmaps_abs(self):
aug1 = iaa.PiecewiseAffine(scale=1, nb_rows=12, nb_cols=4,
absolute_scale=True)
aug2 = iaa.PiecewiseAffine(scale=10, nb_rows=12, nb_cols=4,
absolute_scale=True)
observed1 = aug1.augment_heatmaps([self.heatmaps])[0]
observed2 = aug2.augment_heatmaps([self.heatmaps])[0]
observed1_arr = observed1.get_arr()
observed2_arr = observed2.get_arr()
assert observed1.shape == self.heatmaps.shape
assert observed2.shape == self.heatmaps.shape
_assert_same_min_max(observed1, self.heatmaps)
_assert_same_min_max(observed2, self.heatmaps)
assert (
np.average(observed1_arr[~self.mask])
< np.average(observed2_arr[~self.mask])
)
def test_scale_stronger_values_should_increase_changes_segmaps(self):
# stronger scale should lead to stronger changes, segmaps
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
observed1 = aug1.augment_segmentation_maps([self.segmaps])[0]
observed2 = aug2.augment_segmentation_maps([self.segmaps])[0]
observed1_arr = observed1.get_arr()
observed2_arr = observed2.get_arr()
assert observed1.shape == self.segmaps.shape
assert observed2.shape == self.segmaps.shape
assert (
np.average(observed1_arr[~self.mask] == 0)
> np.average(observed2_arr[~self.mask] == 0)
)
def test_scale_alignment_between_images_and_heatmaps(self):
# strong scale, measure alignment between images and heatmaps
aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(self.image)
hm_aug = aug_det.augment_heatmaps([self.heatmaps])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = hm_aug.arr_0to1 > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (60, 80, 3)
_assert_same_min_max(hm_aug, self.heatmaps)
assert (same / img_aug_mask.size) >= 0.98
def test_scale_alignment_between_images_and_segmaps(self):
# strong scale, measure alignment between images and segmaps
aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(self.image)
segmap_aug = aug_det.augment_segmentation_maps([self.segmaps])[0]
img_aug_mask = (img_aug > 255*0.1)
segmap_aug_mask = (segmap_aug.arr == 1)
same = np.sum(img_aug_mask == segmap_aug_mask[:, :, 0])
assert segmap_aug.shape == (60, 80, 3)
assert (same / img_aug_mask.size) >= 0.9
def test_scale_alignment_between_images_and_smaller_heatmaps(self):
# strong scale, measure alignment between images and heatmaps
# heatmaps here smaller than image
aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug_det = aug.to_deterministic()
heatmaps_small = ia.HeatmapsOnImage(
(
ia.imresize_single_image(
self.image, (30, 40+10), interpolation="cubic"
) / 255.0
).astype(np.float32),
shape=(60, 80, 3)
)
img_aug = aug_det.augment_image(self.image)
hm_aug = aug_det.augment_heatmaps([heatmaps_small])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, (60, 80), interpolation="cubic"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (60, 80, 3)
assert hm_aug.arr_0to1.shape == (30, 40+10, 1)
assert (same / img_aug_mask.size) >= 0.9 # seems to be 0.948 actually
def test_scale_alignment_between_images_and_smaller_heatmaps_abs(self):
# image is 60x80, so a scale of 8 is about 0.1*max(60,80)
aug = iaa.PiecewiseAffine(scale=8, nb_rows=12, nb_cols=4,
absolute_scale=True)
aug_det = aug.to_deterministic()
heatmaps_small = ia.HeatmapsOnImage(
(
ia.imresize_single_image(
self.image, (30, 40+10), interpolation="cubic"
) / 255.0
).astype(np.float32),
shape=(60, 80, 3)
)
img_aug = aug_det.augment_image(self.image)
hm_aug = aug_det.augment_heatmaps([heatmaps_small])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, (60, 80), interpolation="cubic"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (60, 80, 3)
assert hm_aug.arr_0to1.shape == (30, 40+10, 1)
assert (same / img_aug_mask.size) >= 0.9 # seems to be 0.930 actually
def test_scale_alignment_between_images_and_smaller_segmaps(self):
# strong scale, measure alignment between images and segmaps
# segmaps here smaller than image
aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug_det = aug.to_deterministic()
segmaps_small = SegmentationMapsOnImage(
(
ia.imresize_single_image(
self.image, (30, 40+10), interpolation="cubic"
) > 100
).astype(np.int32),
shape=(60, 80, 3)
)
img_aug = aug_det.augment_image(self.image)
segmaps_aug = aug_det.augment_segmentation_maps([segmaps_small])[0]
img_aug_mask = img_aug > 255*0.1
segmaps_aug_mask = (
ia.imresize_single_image(
segmaps_aug.arr, (60, 80),
interpolation="nearest"
) == 1
)
same = np.sum(img_aug_mask == segmaps_aug_mask[:, :, 0])
assert segmaps_aug.shape == (60, 80, 3)
assert segmaps_aug.arr.shape == (30, 40+10, 1)
assert (same / img_aug_mask.size) >= 0.9
def test_scale_alignment_between_images_and_keypoints(self):
# strong scale, measure alignment between images and keypoints
aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug_det = aug.to_deterministic()
kps = [ia.Keypoint(x=5, y=15), ia.Keypoint(x=17, y=12)]
kpsoi = ia.KeypointsOnImage(kps, shape=(24, 30, 3))
img_kps = np.zeros((24, 30, 3), dtype=np.uint8)
img_kps = kpsoi.draw_on_image(img_kps, color=[255, 255, 255])
img_kps_aug = aug_det.augment_image(img_kps)
kpsoi_aug = aug_det.augment_keypoints([kpsoi])[0]
assert kpsoi_aug.shape == (24, 30, 3)
bb1 = ia.BoundingBox(
x1=kpsoi_aug.keypoints[0].x-1, y1=kpsoi_aug.keypoints[0].y-1,
x2=kpsoi_aug.keypoints[0].x+1, y2=kpsoi_aug.keypoints[0].y+1)
bb2 = ia.BoundingBox(
x1=kpsoi_aug.keypoints[1].x-1, y1=kpsoi_aug.keypoints[1].y-1,
x2=kpsoi_aug.keypoints[1].x+1, y2=kpsoi_aug.keypoints[1].y+1)
patch1 = bb1.extract_from_image(img_kps_aug)
patch2 = bb2.extract_from_image(img_kps_aug)
assert np.max(patch1) > 150
assert np.max(patch2) > 150
assert np.average(img_kps_aug) < 40
# this test was apparently added later on (?) without noticing that
# a similar test already existed
def test_scale_alignment_between_images_and_keypoints2(self):
img = np.zeros((100, 80), dtype=np.uint8)
img[:, 9:11+1] = 255
img[:, 69:71+1] = 255
kps = [ia.Keypoint(x=10, y=20), ia.Keypoint(x=10, y=40),
ia.Keypoint(x=70, y=20), ia.Keypoint(x=70, y=40)]
kpsoi = ia.KeypointsOnImage(kps, shape=img.shape)
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
aug_det = aug.to_deterministic()
observed_img = aug_det.augment_image(img)
observed_kpsoi = aug_det.augment_keypoints([kpsoi])
assert not keypoints_equal([kpsoi], observed_kpsoi)
for kp in observed_kpsoi[0].keypoints:
assert observed_img[int(kp.y), int(kp.x)] > 0
@classmethod
def _test_scale_alignment_between_images_and_poly_or_line_strings(
cls, cba_class, cbaoi_class, augf_name):
img = np.zeros((100, 80), dtype=np.uint8)
img[:, 10-5:10+5] = 255
img[:, 70-5:70+5] = 255
coords = [(10, 10),
(70, 10), (70, 20), (70, 30), (70, 40),
(70, 50), (70, 60), (70, 70), (70, 80),
(70, 90),
(10, 90),
(10, 80), (10, 70), (10, 60), (10, 50),
(10, 40), (10, 30), (10, 20), (10, 10)]
cba = cba_class(coords)
cbaoi = cbaoi_class([cba, cba.shift(left=1, top=1)],
shape=img.shape)
aug = iaa.PiecewiseAffine(scale=0.03, nb_rows=10, nb_cols=10)
aug_det = aug.to_deterministic()
observed_imgs = aug_det.augment_images([img, img])
observed_cbaois = getattr(aug_det, augf_name)([cbaoi, cbaoi])
for observed_img, observed_cbaoi in zip(observed_imgs, observed_cbaois):
assert observed_cbaoi.shape == img.shape
for cba_aug in observed_cbaoi.items:
if hasattr(cba_aug, "is_valid"):
assert cba_aug.is_valid
for point_aug in cba_aug.coords:
x = int(np.round(point_aug[0]))
y = int(np.round(point_aug[1]))
assert observed_img[y, x] > 0
def test_scale_alignment_between_images_and_polygons(self):
self._test_scale_alignment_between_images_and_poly_or_line_strings(
ia.Polygon, ia.PolygonsOnImage, "augment_polygons")
def test_scale_alignment_between_images_and_line_strings(self):
self._test_scale_alignment_between_images_and_poly_or_line_strings(
ia.LineString, ia.LineStringsOnImage, "augment_line_strings")
def test_scale_alignment_between_images_and_bounding_boxes(self):
img = np.zeros((100, 80), dtype=np.uint8)
s = 0
img[10-s:10+s+1, 20-s:20+s+1] = 255
img[60-s:60+s+1, 70-s:70+s+1] = 255
bb = ia.BoundingBox(y1=10, x1=20, y2=60, x2=70)
bbsoi = ia.BoundingBoxesOnImage([bb], shape=img.shape)
aug = iaa.PiecewiseAffine(scale=0.03, nb_rows=10, nb_cols=10)
observed_imgs, observed_bbsois = aug(
images=[img], bounding_boxes=[bbsoi])
for observed_img, observed_bbsoi in zip(observed_imgs, observed_bbsois):
assert observed_bbsoi.shape == img.shape
observed_img_x = np.max(observed_img, axis=0)
observed_img_y = np.max(observed_img, axis=1)
nonz_x = np.nonzero(observed_img_x)[0]
nonz_y = np.nonzero(observed_img_y)[0]
img_x1 = min(nonz_x)
img_x2 = max(nonz_x)
img_y1 = min(nonz_y)
img_y2 = max(nonz_y)
expected = ia.BoundingBox(x1=img_x1, y1=img_y1,
x2=img_x2, y2=img_y2)
for bb_aug in observed_bbsoi.bounding_boxes:
# we don't expect perfect IoU here, because the actual
# underlying KP aug used distance maps
# most IoUs seem to end up in the range 0.9-0.95
assert bb_aug.iou(expected) > 0.8
def test_scale_is_list(self):
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug = iaa.PiecewiseAffine(scale=[0.01, 0.10], nb_rows=12, nb_cols=4)
avg1 = np.average([
np.average(
aug1.augment_image(self.image)
* (~self.mask).astype(np.float32)
)
for _ in sm.xrange(3)
])
avg2 = np.average([
np.average(
aug2.augment_image(self.image)
* (~self.mask).astype(np.float32)
)
for _ in sm.xrange(3)
])
seen = [0, 0]
for _ in sm.xrange(15):
observed = aug.augment_image(self.image)
avg = np.average(observed * (~self.mask).astype(np.float32))
diff1 = abs(avg - avg1)
diff2 = abs(avg - avg2)
if diff1 < diff2:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 0
assert seen[1] > 0
# -----
# rows and cols
# -----
@classmethod
def _compute_observed_std_ygrad_in_mask(cls, observed, mask):
grad_vert = (
observed[1:, :].astype(np.float32)
- observed[:-1, :].astype(np.float32)
)
grad_vert = grad_vert * (~mask[1:, :]).astype(np.float32)
return np.std(grad_vert)
def _compute_std_ygrad_in_mask(self, aug, image, mask, nb_iterations):
stds = []
for _ in sm.xrange(nb_iterations):
observed = aug.augment_image(image)
stds.append(
self._compute_observed_std_ygrad_in_mask(observed, mask)
)
return np.average(stds)
def test_nb_rows_affects_images(self):
# verify effects of rows
aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=30, nb_cols=4)
std1 = self._compute_std_ygrad_in_mask(aug1, self.image, self.mask, 3)
std2 = self._compute_std_ygrad_in_mask(aug2, self.image, self.mask, 3)
assert std1 < std2
def test_nb_rows_is_list_affects_images(self):
# rows as list
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=[4, 20], nb_cols=4)
aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=30, nb_cols=4)
std1 = self._compute_std_ygrad_in_mask(aug1, self.image, self.mask, 3)
std2 = self._compute_std_ygrad_in_mask(aug2, self.image, self.mask, 3)
seen = [0, 0]
for _ in sm.xrange(20):
observed = aug.augment_image(self.image)
std = self._compute_observed_std_ygrad_in_mask(observed, self.mask)
diff1 = abs(std - std1)
diff2 = abs(std - std2)
if diff1 < diff2:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 0
assert seen[1] > 0
def test_nb_cols_affects_images(self):
# verify effects of cols
image = self.image.T
mask = self.mask.T
aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=20, nb_cols=4)
std1 = self._compute_std_ygrad_in_mask(aug1, image, mask, 3)
std2 = self._compute_std_ygrad_in_mask(aug2, image, mask, 3)
assert std1 < std2
def test_nb_cols_is_list_affects_images(self):
# cols as list
image = self.image.T
mask = self.mask.T
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=[4, 20])
aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=30)
std1 = self._compute_std_ygrad_in_mask(aug1, image, mask, 3)
std2 = self._compute_std_ygrad_in_mask(aug2, image, mask, 3)
seen = [0, 0]
for _ in sm.xrange(20):
observed = aug.augment_image(image)
std = self._compute_observed_std_ygrad_in_mask(observed, mask)
diff1 = abs(std - std1)
diff2 = abs(std - std2)
if diff1 < diff2:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 0
assert seen[1] > 0
# -----
# order
# -----
# TODO
# -----
# cval
# -----
def test_cval_is_zero(self):
# cval as deterministic
img = np.zeros((50, 50, 3), dtype=np.uint8) + 255
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=10, nb_cols=10,
mode="constant", cval=0)
observed = aug.augment_image(img)
assert np.sum([observed[:, :] == [0, 0, 0]]) > 0
def test_cval_should_be_ignored_by_heatmaps(self):
# cval as deterministic, heatmaps should always use cval=0
heatmaps = HeatmapsOnImage(
np.zeros((50, 50, 1), dtype=np.float32), shape=(50, 50, 3))
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=10, nb_cols=10,
mode="constant", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert np.sum([observed.get_arr()[:, :] >= 0.01]) == 0
def test_cval_should_be_ignored_by_segmaps(self):
# cval as deterministic, segmaps should always use cval=0
segmaps = SegmentationMapsOnImage(
np.zeros((50, 50, 1), dtype=np.int32), shape=(50, 50, 3))
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=10, nb_cols=10,
mode="constant", cval=255)
observed = aug.augment_segmentation_maps([segmaps])[0]
assert np.sum([observed.get_arr()[:, :] > 0]) == 0
def test_cval_is_list(self):
# cval as list
img = np.zeros((20, 20), dtype=np.uint8) + 255
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=5, nb_cols=5,
mode="constant", cval=[0, 10])
seen = [0, 0, 0]
for _ in sm.xrange(30):
observed = aug.augment_image(img)
nb_0 = np.sum([observed[:, :] == 0])
nb_10 = np.sum([observed[:, :] == 10])
if nb_0 > 0:
seen[0] += 1
elif nb_10 > 0:
seen[1] += 1
else:
seen[2] += 1
assert seen[0] > 5
assert seen[1] > 5
assert seen[2] <= 4
# -----
# mode
# -----
# TODO
# ---------
# remaining keypoints tests
# ---------
def test_keypoints_outside_of_image(self):
# keypoints outside of image
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
kps = [ia.Keypoint(x=-10, y=-20)]
kpsoi = ia.KeypointsOnImage(kps, shape=(10, 10, 3))
observed = aug.augment_keypoints(kpsoi)
assert_cbaois_equal(observed, kpsoi)
def test_keypoints_empty(self):
# empty keypoints
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
kpsoi = ia.KeypointsOnImage([], shape=(10, 10, 3))
observed = aug.augment_keypoints(kpsoi)
assert_cbaois_equal(observed, kpsoi)
# ---------
# remaining polygons tests
# ---------
def test_polygons_outside_of_image(self):
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=10, nb_cols=10)
exterior = [(-10, -10), (110, -10), (110, 90), (-10, 90)]
poly = ia.Polygon(exterior)
psoi = ia.PolygonsOnImage([poly], shape=(10, 10, 3))
observed = aug.augment_polygons(psoi)
assert_cbaois_equal(observed, psoi)
def test_empty_polygons(self):
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
psoi = ia.PolygonsOnImage([], shape=(10, 10, 3))
observed = aug.augment_polygons(psoi)
assert_cbaois_equal(observed, psoi)
# ---------
# remaining line string tests
# ---------
def test_line_strings_outside_of_image(self):
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=10, nb_cols=10)
coords = [(-10, -10), (110, -10), (110, 90), (-10, 90)]
ls = ia.LineString(coords)
lsoi = ia.LineStringsOnImage([ls], shape=(10, 10, 3))
observed = aug.augment_line_strings(lsoi)
assert_cbaois_equal(observed, lsoi)
def test_empty_line_strings(self):
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
lsoi = ia.LineStringsOnImage([], shape=(10, 10, 3))
observed = aug.augment_line_strings(lsoi)
assert_cbaois_equal(observed, lsoi)
# ---------
# remaining bounding box tests
# ---------
def test_bounding_boxes_outside_of_image(self):
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=10, nb_cols=10)
bbs = ia.BoundingBox(x1=-10, y1=-10, x2=15, y2=15)
bbsoi = ia.BoundingBoxesOnImage([bbs], shape=(10, 10, 3))
observed = aug.augment_bounding_boxes(bbsoi)
assert_cbaois_equal(observed, bbsoi)
def test_empty_bounding_boxes(self):
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
bbsoi = ia.BoundingBoxesOnImage([], shape=(10, 10, 3))
observed = aug.augment_bounding_boxes(bbsoi)
assert_cbaois_equal(observed, bbsoi)
# ---------
# zero-sized axes
# ---------
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=2, nb_cols=2)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_zero_sized_axes_absolute_scale(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.PiecewiseAffine(scale=5, nb_rows=2, nb_cols=2,
absolute_scale=True)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
# ---------
# other methods
# ---------
def test_get_parameters(self):
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=10, order=1,
cval=2, mode="constant",
absolute_scale=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert isinstance(params[3], iap.Deterministic)
assert isinstance(params[4], iap.Deterministic)
assert isinstance(params[5], iap.Deterministic)
assert params[6] is False
assert 0.1 - 1e-8 < params[0].value < 0.1 + 1e-8
assert params[1].value == 8
assert params[2].value == 10
assert params[3].value == 1
assert params[4].value == 2
assert params[5].value == "constant"
# ---------
# other dtypes
# ---------
@property
def other_dtypes_mask(self):
mask = np.zeros((21, 21), dtype=bool)
mask[:, 7:13] = True
return mask
def test_other_dtypes_bool(self):
aug = iaa.PiecewiseAffine(scale=0.2, nb_rows=8, nb_cols=4, order=0,
mode="constant")
image = np.zeros((21, 21), dtype=bool)
image[self.other_dtypes_mask] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert not np.all(image_aug == 1)
assert np.any(image_aug[~self.other_dtypes_mask] == 1)
def test_other_dtypes_uint_int(self):
aug = iaa.PiecewiseAffine(scale=0.2, nb_rows=8, nb_cols=4, order=0,
mode="constant")
dtypes = ["uint8", "uint16", "uint32", "int8", "int16", "int32"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value-100, max_value]
values = values + [(-1)*value for value in values]
else:
values = [1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value-100, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((21, 21), dtype=dtype)
image[:, 7:13] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert not np.all(image_aug == value)
assert np.any(image_aug[~self.other_dtypes_mask] == value)
def test_other_dtypes_float(self):
aug = iaa.PiecewiseAffine(scale=0.2, nb_rows=8, nb_cols=4, order=0,
mode="constant")
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((21, 21), dtype=dtype)
image[:, 7:13] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
# TODO switch all other tests from float(...) to
# np.float128(...) pattern, seems to be more accurate
# for 128bit floats
assert not np.all(_isclose(image_aug, np.float128(value)))
assert np.any(_isclose(image_aug[~self.other_dtypes_mask],
np.float128(value)))
class TestPerspectiveTransform(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
img = np.zeros((30, 30), dtype=np.uint8)
img[10:20, 10:20] = 255
return img
@property
def heatmaps(self):
return HeatmapsOnImage((self.image / 255.0).astype(np.float32),
shape=self.image.shape)
@property
def segmaps(self):
return SegmentationMapsOnImage((self.image > 0).astype(np.int32),
shape=self.image.shape)
# --------
# __init__
# --------
def test___init___scale_is_tuple(self):
# tuple for scale
aug = iaa.PerspectiveTransform(scale=(0.1, 0.2))
assert isinstance(aug.jitter.scale, iap.Uniform)
assert isinstance(aug.jitter.scale.a, iap.Deterministic)
assert isinstance(aug.jitter.scale.b, iap.Deterministic)
assert 0.1 - 1e-8 < aug.jitter.scale.a.value < 0.1 + 1e-8
assert 0.2 - 1e-8 < aug.jitter.scale.b.value < 0.2 + 1e-8
def test___init___scale_is_list(self):
# list for scale
aug = iaa.PerspectiveTransform(scale=[0.1, 0.2, 0.3])
assert isinstance(aug.jitter.scale, iap.Choice)
assert len(aug.jitter.scale.a) == 3
assert 0.1 - 1e-8 < aug.jitter.scale.a[0] < 0.1 + 1e-8
assert 0.2 - 1e-8 < aug.jitter.scale.a[1] < 0.2 + 1e-8
assert 0.3 - 1e-8 < aug.jitter.scale.a[2] < 0.3 + 1e-8
def test___init___scale_is_stochastic_parameter(self):
# StochasticParameter for scale
aug = iaa.PerspectiveTransform(scale=iap.Choice([0.1, 0.2, 0.3]))
assert isinstance(aug.jitter.scale, iap.Choice)
assert len(aug.jitter.scale.a) == 3
assert 0.1 - 1e-8 < aug.jitter.scale.a[0] < 0.1 + 1e-8
assert 0.2 - 1e-8 < aug.jitter.scale.a[1] < 0.2 + 1e-8
assert 0.3 - 1e-8 < aug.jitter.scale.a[2] < 0.3 + 1e-8
def test___init___bad_datatype_for_scale_leads_to_failure(self):
# bad datatype for scale
got_exception = False
try:
_ = iaa.PerspectiveTransform(scale=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___mode_is_all(self):
aug = iaa.PerspectiveTransform(cval=0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
def test___init___mode_is_string(self):
aug = iaa.PerspectiveTransform(cval=0, mode="replicate")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "replicate"
def test___init___mode_is_list(self):
aug = iaa.PerspectiveTransform(cval=0, mode=["replicate", "constant"])
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "replicate" in aug.mode.a
and "constant" in aug.mode.a)
def test___init___mode_is_stochastic_parameter(self):
aug = iaa.PerspectiveTransform(
cval=0, mode=iap.Choice(["replicate", "constant"]))
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "replicate" in aug.mode.a
and "constant" in aug.mode.a)
# --------
# image, heatmaps, segmaps
# --------
def test_image_without_keep_size(self):
# without keep_size
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_image(self.image)
y1 = int(30*0.2)
y2 = int(30*0.8)
x1 = int(30*0.2)
x2 = int(30*0.8)
expected = self.image[y1:y2, x1:x2]
assert all([
abs(s1-s2) <= 1 for s1, s2 in zip(observed.shape, expected.shape)
])
if observed.shape != expected.shape:
observed = ia.imresize_single_image(
observed, expected.shape[0:2], interpolation="cubic")
# differences seem to mainly appear around the border of the inner
# rectangle, possibly due to interpolation
assert np.average(
np.abs(observed.astype(np.int32) - expected.astype(np.int32))
) < 30.0
def test_image_heatmaps_alignment_without_keep_size(self):
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
hm = HeatmapsOnImage(
self.image.astype(np.float32)/255.0,
shape=(30, 30)
)
observed = aug.augment_image(self.image)
hm_aug = aug.augment_heatmaps([hm])[0]
y1 = int(30*0.2)
y2 = int(30*0.8)
x1 = int(30*0.2)
x2 = int(30*0.8)
expected = (y2 - y1, x2 - x1)
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(hm_aug.shape, expected)
])
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(hm_aug.arr_0to1.shape, expected + (1,))
])
img_aug_mask = observed > 255*0.1
hm_aug_mask = hm_aug.arr_0to1 > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.99
def test_image_segmaps_alignment_without_keep_size(self):
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
segmaps = SegmentationMapsOnImage(
(self.image > 100).astype(np.int32),
shape=(30, 30)
)
observed = aug.augment_image(self.image)
segmaps_aug = aug.augment_segmentation_maps([segmaps])[0]
y1 = int(30*0.2)
y2 = int(30*0.8)
x1 = int(30*0.2)
x2 = int(30*0.8)
expected = (y2 - y1, x2 - x1)
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(segmaps_aug.shape, expected)
])
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(segmaps_aug.arr.shape, expected + (1,))
])
img_aug_mask = observed > 255*0.5
segmaps_aug_mask = segmaps_aug.arr > 0
same = np.sum(img_aug_mask == segmaps_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.99
def test_heatmaps_smaller_than_image_without_keep_size(self):
# without keep_size, different heatmap size
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
y1 = int(30*0.2)
y2 = int(30*0.8)
x1 = int(30*0.2)
x2 = int(30*0.8)
x1_small = int(25*0.2)
x2_small = int(25*0.8)
y1_small = int(20*0.2)
y2_small = int(20*0.8)
img_small = ia.imresize_single_image(
self.image,
(20, 25),
interpolation="cubic")
hm = ia.HeatmapsOnImage(
img_small.astype(np.float32)/255.0,
shape=(30, 30))
img_aug = aug.augment_image(self.image)
hm_aug = aug.augment_heatmaps([hm])[0]
expected = (y2 - y1, x2 - x1)
expected_small = (y2_small - y1_small, x2_small - x1_small, 1)
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(hm_aug.shape, expected)
])
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(hm_aug.arr_0to1.shape, expected_small)
])
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, img_aug.shape[0:2], interpolation="cubic"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.96
def test_segmaps_smaller_than_image_without_keep_size(self):
# without keep_size, different segmap size
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
y1 = int(30*0.2)
y2 = int(30*0.8)
x1 = int(30*0.2)
x2 = int(30*0.8)
x1_small = int(25*0.2)
x2_small = int(25*0.8)
y1_small = int(20*0.2)
y2_small = int(20*0.8)
img_small = ia.imresize_single_image(
self.image,
(20, 25),
interpolation="cubic")
seg = SegmentationMapsOnImage(
(img_small > 100).astype(np.int32),
shape=(30, 30))
img_aug = aug.augment_image(self.image)
seg_aug = aug.augment_segmentation_maps([seg])[0]
expected = (y2 - y1, x2 - x1)
expected_small = (y2_small - y1_small, x2_small - x1_small, 1)
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(seg_aug.shape, expected)
])
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(seg_aug.arr.shape, expected_small)
])
img_aug_mask = img_aug > 255*0.5
seg_aug_mask = ia.imresize_single_image(
seg_aug.arr, img_aug.shape[0:2], interpolation="nearest") > 0
same = np.sum(img_aug_mask == seg_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.92
def test_image_with_keep_size(self):
# with keep_size
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_image(self.image)
expected = self.image[int(30*0.2):int(30*0.8),
int(30*0.2):int(30*0.8)]
expected = ia.imresize_single_image(
expected,
self.image.shape[0:2],
interpolation="cubic")
assert observed.shape == self.image.shape
# differences seem to mainly appear around the border of the inner
# rectangle, possibly due to interpolation
assert np.average(
np.abs(observed.astype(np.int32) - expected.astype(np.int32))
) < 30.0
def test_heatmaps_with_keep_size(self):
# with keep_size, heatmaps
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_heatmaps([self.heatmaps])[0]
heatmaps_arr = self.heatmaps.get_arr()
expected = heatmaps_arr[int(30*0.2):int(30*0.8),
int(30*0.2):int(30*0.8)]
expected = ia.imresize_single_image(
(expected*255).astype(np.uint8),
self.image.shape[0:2],
interpolation="cubic")
expected = (expected / 255.0).astype(np.float32)
assert observed.shape == self.heatmaps.shape
_assert_same_min_max(observed, self.heatmaps)
# differences seem to mainly appear around the border of the inner
# rectangle, possibly due to interpolation
assert np.average(np.abs(observed.get_arr() - expected)) < 30.0
def test_segmaps_with_keep_size(self):
# with keep_size, segmaps
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
segmaps_arr = self.segmaps.get_arr()
expected = segmaps_arr[int(30*0.2):int(30*0.8),
int(30*0.2):int(30*0.8)]
expected = ia.imresize_single_image(
(expected*255).astype(np.uint8),
self.image.shape[0:2],
interpolation="cubic")
expected = (expected > 255*0.5).astype(np.int32)
assert observed.shape == self.segmaps.shape
assert np.average(observed.get_arr() != expected) < 0.05
def test_image_rgb_with_keep_size(self):
# with keep_size, RGB images
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
imgs = np.tile(self.image[np.newaxis, :, :, np.newaxis], (2, 1, 1, 3))
observed = aug.augment_images(imgs)
for img_idx in sm.xrange(2):
for c in sm.xrange(3):
observed_i = observed[img_idx, :, :, c]
expected = imgs[img_idx,
int(30*0.2):int(30*0.8),
int(30*0.2):int(30*0.8),
c]
expected = ia.imresize_single_image(
expected, imgs.shape[1:3], interpolation="cubic")
assert observed_i.shape == imgs.shape[1:3]
# differences seem to mainly appear around the border of the
# inner rectangle, possibly due to interpolation
assert np.average(
np.abs(
observed_i.astype(np.int32) - expected.astype(np.int32)
)
) < 30.0
# --------
# keypoints
# --------
def test_keypoints_without_keep_size(self):
# keypoint augmentation without keep_size
# TODO deviations of around 0.4-0.7 in this and the next test (between
# expected and observed coordinates) -- why?
kps = [ia.Keypoint(x=10, y=10), ia.Keypoint(x=14, y=11)]
kpsoi = ia.KeypointsOnImage(kps, shape=self.image.shape)
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_keypoints([kpsoi])
kps_expected = [
ia.Keypoint(x=10-0.2*30, y=10-0.2*30),
ia.Keypoint(x=14-0.2*30, y=11-0.2*30)
]
gen = zip(observed[0].keypoints, kps_expected)
# TODO deviations of around 0.5 here from expected values, why?
for kp_observed, kp_expected in gen:
assert kp_observed.coords_almost_equals(
kp_expected, max_distance=1.5)
def test_keypoints_with_keep_size(self):
# keypoint augmentation with keep_size
kps = [ia.Keypoint(x=10, y=10), ia.Keypoint(x=14, y=11)]
kpsoi = ia.KeypointsOnImage(kps, shape=self.image.shape)
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_keypoints([kpsoi])
kps_expected = [
ia.Keypoint(x=((10-0.2*30)/(30*0.6))*30,
y=((10-0.2*30)/(30*0.6))*30),
ia.Keypoint(x=((14-0.2*30)/(30*0.6))*30,
y=((11-0.2*30)/(30*0.6))*30)
]
gen = zip(observed[0].keypoints, kps_expected)
# TODO deviations of around 0.5 here from expected values, why?
for kp_observed, kp_expected in gen:
assert kp_observed.coords_almost_equals(
kp_expected, max_distance=1.5)
def test_image_keypoint_alignment(self):
img = np.zeros((100, 100), dtype=np.uint8)
img[25-3:25+3, 25-3:25+3] = 255
img[50-3:50+3, 25-3:25+3] = 255
img[75-3:75+3, 25-3:25+3] = 255
img[25-3:25+3, 75-3:75+3] = 255
img[50-3:50+3, 75-3:75+3] = 255
img[75-3:75+3, 75-3:75+3] = 255
img[50-3:75+3, 50-3:75+3] = 255
kps = [
ia.Keypoint(y=25, x=25), ia.Keypoint(y=50, x=25),
ia.Keypoint(y=75, x=25), ia.Keypoint(y=25, x=75),
ia.Keypoint(y=50, x=75), ia.Keypoint(y=75, x=75),
ia.Keypoint(y=50, x=50)
]
kpsoi = ia.KeypointsOnImage(kps, shape=img.shape)
aug = iaa.PerspectiveTransform(scale=(0.05, 0.15), keep_size=True)
for _ in sm.xrange(10):
aug_det = aug.to_deterministic()
imgs_aug = aug_det.augment_images([img, img])
kpsois_aug = aug_det.augment_keypoints([kpsoi, kpsoi])
for img_aug, kpsoi_aug in zip(imgs_aug, kpsois_aug):
assert kpsoi_aug.shape == img.shape
for kp_aug in kpsoi_aug.keypoints:
x, y = int(np.round(kp_aug.x)), int(np.round(kp_aug.y))
if 0 <= x < img.shape[1] and 0 <= y < img.shape[0]:
assert img_aug[y, x] > 10
def test_empty_keypoints(self):
# test empty keypoints
kpsoi = ia.KeypointsOnImage([], shape=(20, 10, 3))
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
observed = aug.augment_keypoints(kpsoi)
assert_cbaois_equal(observed, kpsoi)
# --------
# abstract test methods for polygons and line strings
# --------
@classmethod
def _test_cbaois_without_keep_size(cls, cba_class, cbaoi_class, augf_name):
points = np.float32([
[10, 10],
[25, 10],
[25, 25],
[10, 25]
])
cbaoi = cbaoi_class([cba_class(points)], shape=(30, 30, 3))
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
observed = getattr(aug, augf_name)(cbaoi)
assert observed.shape == (30 - 12, 30 - 12, 3)
assert len(observed.items) == 1
if hasattr(observed.items[0], "is_valid"):
assert observed.items[0].is_valid
points_expected = np.copy(points)
points_expected[:, 0] -= 0.2 * 30
points_expected[:, 1] -= 0.2 * 30
# TODO deviations of around 0.5 here from expected values, why?
assert observed.items[0].coords_almost_equals(
points_expected, max_distance=1.5)
@classmethod
def _test_cbaois_with_keep_size(cls, cba_class, cbaoi_class, augf_name):
# polygon augmentation with keep_size
points = np.float32([
[10, 10],
[25, 10],
[25, 25],
[10, 25]
])
cbaoi = cbaoi_class([cba_class(points)], shape=(30, 30, 3))
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = getattr(aug, augf_name)(cbaoi)
assert observed.shape == (30, 30, 3)
assert len(observed.items) == 1
if hasattr(observed.items[0], "is_valid"):
assert observed.items[0].is_valid
points_expected = np.copy(points)
points_expected[:, 0] = (
(points_expected[:, 0] - 0.2 * 30) / (30 * 0.6)
) * 30
points_expected[:, 1] = (
(points_expected[:, 1] - 0.2 * 30) / (30 * 0.6)
) * 30
# TODO deviations of around 0.5 here from expected values, why?
assert observed.items[0].coords_almost_equals(
points_expected, max_distance=2.5)
@classmethod
def _test_image_cba_alignment(cls, cba_class, cbaoi_class, augf_name):
img = np.zeros((100, 100), dtype=np.uint8)
img[25-3:25+3, 25-3:25+3] = 255
img[50-3:50+3, 25-3:25+3] = 255
img[75-3:75+3, 25-3:25+3] = 255
img[25-3:25+3, 75-3:75+3] = 255
img[50-3:50+3, 75-3:75+3] = 255
img[75-3:75+3, 75-3:75+3] = 255
points = [
[25, 25],
[75, 25],
[75, 50],
[75, 75],
[25, 75],
[25, 50]
]
cbaoi = cbaoi_class([cba_class(points)], shape=img.shape)
aug = iaa.PerspectiveTransform(scale=0.1, keep_size=True)
for _ in sm.xrange(10):
aug_det = aug.to_deterministic()
imgs_aug = aug_det.augment_images([img] * 4)
cbaois_aug = getattr(aug_det, augf_name)([cbaoi] * 4)
for img_aug, cbaoi_aug in zip(imgs_aug, cbaois_aug):
assert cbaoi_aug.shape == img.shape
for cba_aug in cbaoi_aug.items:
if hasattr(cba_aug, "is_valid"):
assert cba_aug.is_valid
for x, y in cba_aug.coords:
if 0 <= x < img.shape[1] and 0 <= y < img.shape[0]:
bb = ia.BoundingBox(x1=x-2, x2=x+2, y1=y-2, y2=y+2)
img_ex = bb.extract_from_image(img_aug)
assert np.any(img_ex > 10)
@classmethod
def _test_empty_cba(cls, cbaoi, augf_name):
# test empty polygons
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi)
# --------
# polygons
# --------
def test_polygons_without_keep_size(self):
self._test_cbaois_without_keep_size(ia.Polygon, ia.PolygonsOnImage,
"augment_polygons")
def test_polygons_with_keep_size(self):
self._test_cbaois_with_keep_size(ia.Polygon, ia.PolygonsOnImage,
"augment_polygons")
def test_image_polygon_alignment(self):
self._test_image_cba_alignment(ia.Polygon, ia.PolygonsOnImage,
"augment_polygons")
def test_empty_polygons(self):
psoi = ia.PolygonsOnImage([], shape=(20, 10, 3))
self._test_empty_cba(psoi, "augment_polygons")
def test_polygons_under_extreme_scale_values(self):
# test extreme scales
# TODO when setting .min_height and .min_width in PerspectiveTransform
# to 1x1, at least one of the output polygons was invalid and had
# only 3 instead of the expected 4 points - why?
for scale in [0.1, 0.2, 0.3, 0.4]:
with self.subTest(scale=scale):
exterior = np.float32([
[10, 10],
[25, 10],
[25, 25],
[10, 25]
])
psoi = ia.PolygonsOnImage([ia.Polygon(exterior)],
shape=(30, 30, 3))
aug = iaa.PerspectiveTransform(scale=scale, keep_size=True)
aug.jitter = iap.Deterministic(scale)
observed = aug.augment_polygons(psoi)
assert observed.shape == (30, 30, 3)
assert len(observed.polygons) == 1
assert observed.polygons[0].is_valid
# FIXME this part is currently deactivated due to too large
# deviations from expectations. As the alignment check
# works, this is probably some error on the test side
"""
exterior_expected = np.copy(exterior)
exterior_expected[:, 0] = (
(exterior_expected[:, 0] - scale * 30) / (30*(1-2*scale))
) * 30
exterior_expected[:, 1] = (
(exterior_expected[:, 1] - scale * 30) / (30*(1-2*scale))
) * 30
poly0 = observed.polygons[0]
# TODO deviations of around 0.5 here from expected values, why?
assert poly0.exterior_almost_equals(
exterior_expected, max_distance=2.0)
"""
# --------
# line strings
# --------
def test_line_strings_without_keep_size(self):
self._test_cbaois_without_keep_size(ia.LineString, ia.LineStringsOnImage,
"augment_line_strings")
def test_line_strings_with_keep_size(self):
self._test_cbaois_with_keep_size(ia.LineString, ia.LineStringsOnImage,
"augment_line_strings")
def test_image_line_string_alignment(self):
self._test_image_cba_alignment(ia.LineString, ia.LineStringsOnImage,
"augment_line_strings")
def test_empty_line_strings(self):
lsoi = ia.LineStringsOnImage([], shape=(20, 10, 3))
self._test_empty_cba(lsoi, "augment_line_strings")
# --------
# bounding boxes
# --------
def test_bounding_boxes_without_keep_size(self):
# BB augmentation without keep_size
# TODO deviations of around 0.4-0.7 in this and the next test (between
# expected and observed coordinates) -- why?
bbs = [ia.BoundingBox(x1=0, y1=10, x2=20, y2=20)]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_bounding_boxes([bbsoi])
bbs_expected = [
ia.BoundingBox(x1=0-0.2*30, y1=10-0.2*30,
x2=20-0.2*30, y2=20-0.2*30)
]
gen = zip(observed[0].bounding_boxes, bbs_expected)
# TODO deviations of around 0.5 here from expected values, why?
for bb_observed, bb_expected in gen:
assert bb_observed.coords_almost_equals(
bb_expected, max_distance=1.5)
def test_bounding_boxes_with_keep_size(self):
# BB augmentation with keep_size
bbs = [ia.BoundingBox(x1=0, y1=10, x2=20, y2=20)]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_bounding_boxes([bbsoi])
bbs_expected = [
ia.BoundingBox(
x1=((0-0.2*30)/(30*0.6))*30,
y1=((10-0.2*30)/(30*0.6))*30,
x2=((20-0.2*30)/(30*0.6))*30,
y2=((20-0.2*30)/(30*0.6))*30
)
]
gen = zip(observed[0].bounding_boxes, bbs_expected)
# TODO deviations of around 0.5 here from expected values, why?
for bb_observed, bb_expected in gen:
assert bb_observed.coords_almost_equals(
bb_expected, max_distance=1.5)
def test_image_bounding_box_alignment(self):
img = np.zeros((100, 100), dtype=np.uint8)
img[35:35+1, 35:65+1] = 255
img[65:65+1, 35:65+1] = 255
img[35:65+1, 35:35+1] = 255
img[35:65+1, 65:65+1] = 255
bbs = [
ia.BoundingBox(y1=35.5, x1=35.5, y2=65.5, x2=65.5),
]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=img.shape)
aug = iaa.PerspectiveTransform(scale=(0.05, 0.2), keep_size=True)
for _ in sm.xrange(10):
imgs_aug, bbsois_aug = aug(
images=[img, img, img, img],
bounding_boxes=[bbsoi, bbsoi, bbsoi, bbsoi])
nb_skipped = 0
for img_aug, bbsoi_aug in zip(imgs_aug, bbsois_aug):
assert bbsoi_aug.shape == img_aug.shape
for bb_aug in bbsoi_aug.bounding_boxes:
if bb_aug.is_fully_within_image(img_aug):
# top, bottom, left, right
x1 = bb_aug.x1_int
x2 = bb_aug.x2_int
y1 = bb_aug.y1_int
y2 = bb_aug.y2_int
top_row = img_aug[y1-1:y1+1, x1-1:x2+1]
btm_row = img_aug[y2-1:y2+1, x1-1:x2+1]
lft_row = img_aug[y1-1:y2+1, x1-1:x1+1]
rgt_row = img_aug[y1-1:y2+1, x2-1:x2+1]
assert np.max(top_row) > 10
assert np.max(btm_row) > 10
assert np.max(lft_row) > 10
assert np.max(rgt_row) > 10
else:
nb_skipped += 1
assert nb_skipped <= 2
def test_empty_bounding_boxes(self):
# test empty bounding boxes
bbsoi = ia.BoundingBoxesOnImage([], shape=(20, 10, 3))
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
observed = aug.augment_bounding_boxes(bbsoi)
assert_cbaois_equal(observed, bbsoi)
# ------------
# mode
# ------------
def test_draw_samples_with_mode_being_int(self):
aug = iaa.PerspectiveTransform(scale=0.001, mode=cv2.BORDER_REPLICATE)
samples = aug._draw_samples([(10, 10, 3)], iarandom.RNG(0))
assert samples.modes.shape == (1,)
assert samples.modes[0] == cv2.BORDER_REPLICATE
def test_draw_samples_with_mode_being_string(self):
aug = iaa.PerspectiveTransform(scale=0.001, mode="replicate")
samples = aug._draw_samples([(10, 10, 3)], iarandom.RNG(0))
assert samples.modes.shape == (1,)
assert samples.modes[0] == cv2.BORDER_REPLICATE
def test_mode_replicate_copies_values(self):
aug = iaa.PerspectiveTransform(
scale=0.001, mode="replicate", cval=0, random_state=31)
img = np.ones((256, 256, 3), dtype=np.uint8) * 255
img_aug = aug.augment_image(img)
assert (img_aug == 255).all()
def test_mode_constant_uses_cval(self):
aug255 = iaa.PerspectiveTransform(
scale=0.001, mode="constant", cval=255, random_state=31)
aug0 = iaa.PerspectiveTransform(
scale=0.001, mode="constant", cval=0, random_state=31)
img = np.ones((256, 256, 3), dtype=np.uint8) * 255
img_aug255 = aug255.augment_image(img)
img_aug0 = aug0.augment_image(img)
assert (img_aug255 == 255).all()
assert not (img_aug0 == 255).all()
# ---------
# fit_output
# ---------
def test_fit_output_with_fixed_jitter(self):
aug = iaa.PerspectiveTransform(scale=0.2, fit_output=True,
keep_size=False)
aug.jitter = iap.Deterministic(0.2)
image = np.zeros((40, 40, 3), dtype=np.uint8)
image[0:3, 0:3, 0] = 255
image[0:3, 40-3:, 1] = 255
image[40-3:, 40-3:, 2] = 255
image_aug = aug(image=image)
h, w = image_aug.shape[0:2]
y0 = np.argmax(image_aug[:, 0, 0])
x0 = np.argmax(image_aug[0, :, 0])
y1 = np.argmax(image_aug[:, w-1, 1])
x1 = np.argmax(image_aug[0, :, 1])
y2 = np.argmax(image_aug[:, w-1, 2])
x2 = np.argmax(image_aug[h-1, :, 2])
# different shape
assert image_aug.shape != image.shape
# corners roughly still at top-left, top-right, bottom-right
assert 0 <= y0 <= 3
assert 0 <= x0 <= 3
assert 0 <= y1 <= 3
assert image_aug.shape[1]-3 <= x1 <= image_aug.shape[1]
assert image_aug.shape[1]-3 <= y2 <= image_aug.shape[1]
assert image_aug.shape[1]-3 <= x2 <= image_aug.shape[1]
# no corner pixels now in the center
assert np.max(image_aug[8:h-8, 8:w-8, :]) == 0
def test_fit_output_with_random_jitter(self):
aug = iaa.PerspectiveTransform(scale=0.1, fit_output=True,
keep_size=False)
image = np.zeros((50, 50, 4), dtype=np.uint8)
image[0:5, 0:5, 0] = 255
image[0:5, 50-5:, 1] = 255
image[50-5:, 50-5:, 2] = 255
image[50-5:, 0:5, 3] = 255
for _ in sm.xrange(10):
image_aug = aug(image=image)
h, w = image_aug.shape[0:2]
arr_nochan = np.max(image_aug, axis=2)
y_idx = np.where(np.max(arr_nochan, axis=1))[0]
x_idx = np.where(np.max(arr_nochan, axis=0))[0]
y_min = np.min(y_idx)
y_max = np.max(y_idx)
x_min = np.min(x_idx)
x_max = np.max(x_idx)
tol = 0
assert 0 <= y_min <= 5+tol
assert 0 <= x_min <= 5+tol
assert h-5-tol <= y_max <= h-1
assert w-5-tol <= x_max <= w-1
def test_fit_output_with_random_jitter__segmentation_maps(self):
aug = iaa.PerspectiveTransform(scale=0.1, fit_output=True,
keep_size=False)
arr = np.zeros((50, 50, 4), dtype=np.uint8)
arr[0:5, 0:5, 0] = 1
arr[0:5, 50-5:, 1] = 1
arr[50-5:, 50-5:, 2] = 1
arr[50-5:, 0:5, 3] = 1
segmap = ia.SegmentationMapsOnImage(arr, shape=(50, 50, 3))
image = np.zeros((49, 49, 3), dtype=np.uint8)
image = ia.pad(image, top=1, right=1, bottom=1, left=1, cval=128)
for _ in sm.xrange(10):
segmap_aug, image_aug = aug(segmentation_maps=segmap, image=image)
h, w = segmap_aug.arr.shape[0:2]
arr_nochan = np.max(segmap_aug.arr, axis=2)
y_idx = np.where(np.max(arr_nochan, axis=1))[0]
x_idx = np.where(np.max(arr_nochan, axis=0))[0]
y_min = np.min(y_idx)
y_max = np.max(y_idx)
x_min = np.min(x_idx)
x_max = np.max(x_idx)
tol = 0
assert 0 <= y_min <= 5+tol
assert 0 <= x_min <= 5+tol
assert h-5-tol <= y_max <= h-1
assert w-5-tol <= x_max <= w-1
def test_fit_output_with_fixed_jitter__keypoints(self):
aug = iaa.PerspectiveTransform(scale=0.1, fit_output=True,
keep_size=False)
kpsoi = ia.KeypointsOnImage.from_xy_array([
(0, 0),
(50, 0),
(50, 50),
(0, 50)
], shape=(50, 50, 3))
for _ in sm.xrange(10):
kpsoi_aug = aug(keypoints=kpsoi)
h, w = kpsoi_aug.shape[0:2]
y0, x0 = kpsoi_aug.keypoints[0].y, kpsoi_aug.keypoints[0].x
y1, x1 = kpsoi_aug.keypoints[1].y, kpsoi_aug.keypoints[1].x
y2, x2 = kpsoi_aug.keypoints[2].y, kpsoi_aug.keypoints[2].x
y3, x3 = kpsoi_aug.keypoints[3].y, kpsoi_aug.keypoints[3].x
y_min = min([y0, y1, y2, y3])
y_max = max([y0, y1, y2, y3])
x_min = min([x0, x1, x2, x3])
x_max = max([x0, x1, x2, x3])
tol = 0.5
assert 0-tol <= y_min <= tol
assert 0-tol <= x_min <= tol
assert h-tol <= y_max <= h+tol
assert w-tol <= x_max <= w+tol
# ---------
# unusual channel numbers
# ---------
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.PerspectiveTransform(scale=0.01)
image_aug = aug(image=image)
assert np.all(image_aug == 0)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
# ---------
# zero-sized axes
# ---------
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
for keep_size in [False, True]:
with self.subTest(shape=shape, keep_size=keep_size):
for _ in sm.xrange(3):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.PerspectiveTransform(scale=0.01)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
# --------
# get_parameters
# --------
def test_get_parameters(self):
aug = iaa.PerspectiveTransform(scale=0.1, keep_size=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Normal)
assert isinstance(params[0].scale, iap.Deterministic)
assert 0.1 - 1e-8 < params[0].scale.value < 0.1 + 1e-8
assert params[1] is False
assert params[2].value == 0
assert params[3].value == "constant"
assert params[4] is False
# --------
# other dtypes
# --------
def test_other_dtypes_bool(self):
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
y1 = int(30 * 0.2)
y2 = int(30 * 0.8)
x1 = int(30 * 0.2)
x2 = int(30 * 0.8)
image = np.zeros((30, 30), dtype=bool)
image[12:18, :] = True
image[:, 12:18] = True
expected = image[y1:y2, x1:x2]
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert image_aug.shape == expected.shape
assert (np.sum(image_aug == expected) / expected.size) > 0.9
def test_other_dtypes_uint_int(self):
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
y1 = int(30 * 0.2)
y2 = int(30 * 0.8)
x1 = int(30 * 0.2)
x2 = int(30 * 0.8)
dtypes = ["uint8", "uint16", "int8", "int16"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [0, 1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value-100, max_value]
values = values + [(-1)*value for value in values]
else:
values = [0, 1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value-100, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((30, 30), dtype=dtype)
image[12:18, :] = value
image[:, 12:18] = value
expected = image[y1:y2, x1:x2]
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert image_aug.shape == expected.shape
# rather high tolerance of 0.7 here because of
# interpolation
assert (
np.sum(image_aug == expected) / expected.size
) > 0.7
def test_other_dtypes_float(self):
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
y1 = int(30 * 0.2)
y2 = int(30 * 0.8)
x1 = int(30 * 0.2)
x2 = int(30 * 0.8)
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((30, 30), dtype=dtype)
image[12:18, :] = value
image[:, 12:18] = value
expected = image[y1:y2, x1:x2]
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert image_aug.shape == expected.shape
# rather high tolerance of 0.7 here because of
# interpolation
assert (
np.sum(_isclose(image_aug, expected)) / expected.size
) > 0.7
class _elastic_trans_temp_thresholds(object):
def __init__(self, alpha, sigma):
self.alpha = alpha
self.sigma = sigma
self.old_alpha = None
self.old_sigma = None
def __enter__(self):
self.old_alpha = iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH
self.old_sigma = iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH
iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH = self.alpha
iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH = self.sigma
def __exit__(self, exc_type, exc_val, exc_tb):
iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH = self.old_alpha
iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH = self.old_sigma
# TODO add tests for order
# TODO improve tests for cval
# TODO add tests for mode
class TestElasticTransformation(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
img = np.zeros((50, 50), dtype=np.uint8) + 255
img = np.pad(img, ((100, 100), (100, 100)), mode="constant",
constant_values=0)
return img
@property
def mask(self):
img = self.image
mask = img > 0
return mask
@property
def heatmaps(self):
img = self.image
return HeatmapsOnImage(img.astype(np.float32) / 255.0,
shape=img.shape)
@property
def segmaps(self):
img = self.image
return SegmentationMapsOnImage((img > 0).astype(np.int32),
shape=img.shape)
# -----------
# __init__
# -----------
def test___init___bad_datatype_for_alpha_leads_to_failure(self):
# test alpha having bad datatype
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=False, sigma=0.25)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___alpha_is_tuple(self):
# test alpha being tuple
aug = iaa.ElasticTransformation(alpha=(1.0, 2.0), sigma=0.25)
assert isinstance(aug.alpha, iap.Uniform)
assert isinstance(aug.alpha.a, iap.Deterministic)
assert isinstance(aug.alpha.b, iap.Deterministic)
assert 1.0 - 1e-8 < aug.alpha.a.value < 1.0 + 1e-8
assert 2.0 - 1e-8 < aug.alpha.b.value < 2.0 + 1e-8
def test___init___sigma_is_tuple(self):
# test sigma being tuple
aug = iaa.ElasticTransformation(alpha=0.25, sigma=(1.0, 2.0))
assert isinstance(aug.sigma, iap.Uniform)
assert isinstance(aug.sigma.a, iap.Deterministic)
assert isinstance(aug.sigma.b, iap.Deterministic)
assert 1.0 - 1e-8 < aug.sigma.a.value < 1.0 + 1e-8
assert 2.0 - 1e-8 < aug.sigma.b.value < 2.0 + 1e-8
def test___init___bad_datatype_for_sigma_leads_to_failure(self):
# test sigma having bad datatype
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=0.25, sigma=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___order_is_all(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=ia.ALL)
assert isinstance(aug.order, iap.Choice)
assert all([order in aug.order.a for order in [0, 1, 2, 3, 4, 5]])
def test___init___order_is_int(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=1)
assert isinstance(aug.order, iap.Deterministic)
assert aug.order.value == 1
def test___init___order_is_list(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=[0, 1, 2])
assert isinstance(aug.order, iap.Choice)
assert all([order in aug.order.a for order in [0, 1, 2]])
def test___init___order_is_stochastic_parameter(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0,
order=iap.Choice([0, 1, 2, 3]))
assert isinstance(aug.order, iap.Choice)
assert all([order in aug.order.a for order in [0, 1, 2, 3]])
def test___init___bad_datatype_for_order_leads_to_failure(self):
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___cval_is_all(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=ia.ALL)
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
def test___init___cval_is_int(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=128)
assert isinstance(aug.cval, iap.Deterministic)
assert aug.cval.value == 128
def test___init___cval_is_list(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0,
cval=[16, 32, 64])
assert isinstance(aug.cval, iap.Choice)
assert all([cval in aug.cval.a for cval in [16, 32, 64]])
def test___init___cval_is_stochastic_parameter(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0,
cval=iap.Choice([16, 32, 64]))
assert isinstance(aug.cval, iap.Choice)
assert all([cval in aug.cval.a for cval in [16, 32, 64]])
def test___init___cval_is_tuple(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=(128, 255))
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 128
assert aug.cval.b.value == 255
def test___init___bad_datatype_for_cval_leads_to_failure(self):
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___mode_is_all(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
assert all([
mode in aug.mode.a
for mode
in ["constant", "nearest", "reflect", "wrap"]])
def test___init___mode_is_string(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode="nearest")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "nearest"
def test___init___mode_is_list(self):
aug = iaa.ElasticTransformation(
alpha=0.25, sigma=1.0, mode=["constant", "nearest"])
assert isinstance(aug.mode, iap.Choice)
assert all([mode in aug.mode.a for mode in ["constant", "nearest"]])
def test___init___mode_is_stochastic_parameter(self):
aug = iaa.ElasticTransformation(
alpha=0.25, sigma=1.0, mode=iap.Choice(["constant", "nearest"]))
assert isinstance(aug.mode, iap.Choice)
assert all([mode in aug.mode.a for mode in ["constant", "nearest"]])
def test___init___bad_datatype_for_mode_leads_to_failure(self):
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# -----------
# alpha, sigma
# -----------
def test_images(self):
# test basic funtionality
aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25)
observed = aug.augment_image(self.image)
mask = self.mask
# assume that some white/255 pixels have been moved away from the
# center and replaced by black/0 pixels
assert np.sum(observed[mask]) < np.sum(self.image[mask])
# assume that some black/0 pixels have been moved away from the outer
# area and replaced by white/255 pixels
assert np.sum(observed[~mask]) > np.sum(self.image[~mask])
def test_images_nonsquare(self):
# test basic funtionality with non-square images
aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25)
img_nonsquare = np.zeros((50, 100), dtype=np.uint8) + 255
img_nonsquare = np.pad(img_nonsquare, ((100, 100), (100, 100)),
mode="constant", constant_values=0)
mask_nonsquare = (img_nonsquare > 0)
observed = aug.augment_image(img_nonsquare)
assert (
np.sum(observed[mask_nonsquare])
< np.sum(img_nonsquare[mask_nonsquare]))
assert (
np.sum(observed[~mask_nonsquare])
> np.sum(img_nonsquare[~mask_nonsquare]))
def test_images_unusual_channel_numbers(self):
# test unusual channels numbers
aug = iaa.ElasticTransformation(alpha=5, sigma=0.5)
for nb_channels in [1, 2, 4, 5, 7, 10, 11]:
img_c = np.tile(self.image[..., np.newaxis], (1, 1, nb_channels))
assert img_c.shape == (250, 250, nb_channels)
observed = aug.augment_image(img_c)
assert observed.shape == (250, 250, nb_channels)
for c in sm.xrange(1, nb_channels):
assert np.array_equal(observed[..., c], observed[..., 0])
def test_heatmaps(self):
# test basic funtionality, heatmaps
aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25)
observed = aug.augment_heatmaps([self.heatmaps])[0]
mask = self.mask
assert observed.shape == self.heatmaps.shape
_assert_same_min_max(observed, self.heatmaps)
assert (
np.sum(observed.get_arr()[mask])
< np.sum(self.heatmaps.get_arr()[mask]))
assert (
np.sum(observed.get_arr()[~mask])
> np.sum(self.heatmaps.get_arr()[~mask]))
def test_segmaps(self):
# test basic funtionality, segmaps
# alpha=1.5 instead of 0.5 as above here, because otherwise nothing
# is moved
aug = iaa.ElasticTransformation(alpha=1.5, sigma=0.25)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
mask = self.mask
assert observed.shape == self.segmaps.shape
assert (
np.sum(observed.get_arr()[mask])
< np.sum(self.segmaps.get_arr()[mask]))
assert (
np.sum(observed.get_arr()[~mask])
> np.sum(self.segmaps.get_arr()[~mask]))
def test_images_weak_vs_strong_alpha(self):
# test effects of increased alpha strength
aug1 = iaa.ElasticTransformation(alpha=0.1, sigma=0.25)
aug2 = iaa.ElasticTransformation(alpha=5.0, sigma=0.25)
observed1 = aug1.augment_image(self.image)
observed2 = aug2.augment_image(self.image)
mask = self.mask
# assume that the inner area has become more black-ish when using high
# alphas (more white pixels were moved out of the inner area)
assert np.sum(observed1[mask]) > np.sum(observed2[mask])
# assume that the outer area has become more white-ish when using high
# alphas (more black pixels were moved into the inner area)
assert np.sum(observed1[~mask]) < np.sum(observed2[~mask])
def test_heatmaps_weak_vs_strong_alpha(self):
# test effects of increased alpha strength, heatmaps
aug1 = iaa.ElasticTransformation(alpha=0.1, sigma=0.25)
aug2 = iaa.ElasticTransformation(alpha=5.0, sigma=0.25)
observed1 = aug1.augment_heatmaps([self.heatmaps])[0]
observed2 = aug2.augment_heatmaps([self.heatmaps])[0]
mask = self.mask
assert observed1.shape == self.heatmaps.shape
assert observed2.shape == self.heatmaps.shape
_assert_same_min_max(observed1, self.heatmaps)
_assert_same_min_max(observed2, self.heatmaps)
assert (
np.sum(observed1.get_arr()[mask])
> np.sum(observed2.get_arr()[mask]))
assert (
np.sum(observed1.get_arr()[~mask])
< np.sum(observed2.get_arr()[~mask]))
def test_segmaps_weak_vs_strong_alpha(self):
# test effects of increased alpha strength, segmaps
aug1 = iaa.ElasticTransformation(alpha=0.1, sigma=0.25)
aug2 = iaa.ElasticTransformation(alpha=5.0, sigma=0.25)
observed1 = aug1.augment_segmentation_maps([self.segmaps])[0]
observed2 = aug2.augment_segmentation_maps([self.segmaps])[0]
mask = self.mask
assert observed1.shape == self.segmaps.shape
assert observed2.shape == self.segmaps.shape
assert (
np.sum(observed1.get_arr()[mask])
> np.sum(observed2.get_arr()[mask]))
assert (
np.sum(observed1.get_arr()[~mask])
< np.sum(observed2.get_arr()[~mask]))
def test_images_low_vs_high_sigma(self):
# test effects of increased sigmas
aug1 = iaa.ElasticTransformation(alpha=3.0, sigma=0.1)
aug2 = iaa.ElasticTransformation(alpha=3.0, sigma=3.0)
observed1 = aug1.augment_image(self.image)
observed2 = aug2.augment_image(self.image)
observed1_std_hori = np.std(
observed1.astype(np.float32)[:, 1:]
- observed1.astype(np.float32)[:, :-1])
observed2_std_hori = np.std(
observed2.astype(np.float32)[:, 1:]
- observed2.astype(np.float32)[:, :-1])
observed1_std_vert = np.std(
observed1.astype(np.float32)[1:, :]
- observed1.astype(np.float32)[:-1, :])
observed2_std_vert = np.std(
observed2.astype(np.float32)[1:, :]
- observed2.astype(np.float32)[:-1, :])
observed1_std = (observed1_std_hori + observed1_std_vert) / 2
observed2_std = (observed2_std_hori + observed2_std_vert) / 2
assert observed1_std > observed2_std
def test_images_alpha_is_stochastic_parameter(self):
# test alpha being iap.Choice
aug = iaa.ElasticTransformation(alpha=iap.Choice([0.001, 5.0]),
sigma=0.25)
seen = [0, 0]
for _ in sm.xrange(100):
observed = aug.augment_image(self.image)
diff = np.average(
np.abs(
self.image.astype(np.float32)
- observed.astype(np.float32)
)
)
if diff < 1.0:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 10
assert seen[1] > 10
def test_sigma_is_stochastic_parameter(self):
# test sigma being iap.Choice
aug = iaa.ElasticTransformation(alpha=3.0,
sigma=iap.Choice([0.01, 5.0]))
seen = [0, 0]
for _ in sm.xrange(100):
observed = aug.augment_image(self.image)
observed_std_hori = np.std(
observed.astype(np.float32)[:, 1:]
- observed.astype(np.float32)[:, :-1])
observed_std_vert = np.std(
observed.astype(np.float32)[1:, :]
- observed.astype(np.float32)[:-1, :])
observed_std = (observed_std_hori + observed_std_vert) / 2
if observed_std > 10.0:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 10
assert seen[1] > 10
# -----------
# cval
# -----------
def test_images_cval_is_int_and_order_is_0(self):
aug = iaa.ElasticTransformation(alpha=30.0, sigma=3.0, mode="constant",
cval=255, order=0)
img = np.zeros((100, 100), dtype=np.uint8)
observed = aug.augment_image(img)
assert np.sum(observed == 255) > 0
assert np.sum(np.logical_and(0 < observed, observed < 255)) == 0
def test_images_cval_is_int_and_order_is_0_weak_alpha(self):
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode="constant",
cval=0, order=0)
img = np.zeros((100, 100), dtype=np.uint8)
observed = aug.augment_image(img)
assert np.sum(observed == 255) == 0
def test_images_cval_is_int_and_order_is_2(self):
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode="constant",
cval=255, order=2)
img = np.zeros((100, 100), dtype=np.uint8)
observed = aug.augment_image(img)
assert np.sum(np.logical_and(0 < observed, observed < 255)) > 0
def test_heatmaps_ignore_cval(self):
# cval with heatmaps
heatmaps = HeatmapsOnImage(
np.zeros((32, 32, 1), dtype=np.float32), shape=(32, 32, 3))
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0,
mode="constant", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
_assert_same_min_max(observed, heatmaps)
assert np.sum(observed.get_arr() > 0.01) == 0
def test_segmaps_ignore_cval(self):
# cval with segmaps
segmaps = SegmentationMapsOnImage(
np.zeros((32, 32, 1), dtype=np.int32), shape=(32, 32, 3))
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode="constant",
cval=255)
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == segmaps.shape
assert np.sum(observed.get_arr() > 0) == 0
# -----------
# keypoints
# -----------
def test_keypoints_no_movement_if_alpha_below_threshold(self):
# for small alpha, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=1.0, sigma=0.0):
kps = [
ia.Keypoint(x=1, y=1), ia.Keypoint(x=15, y=25),
ia.Keypoint(x=5, y=5), ia.Keypoint(x=7, y=4),
ia.Keypoint(x=48, y=5), ia.Keypoint(x=21, y=37),
ia.Keypoint(x=32, y=39), ia.Keypoint(x=6, y=8),
ia.Keypoint(x=12, y=21), ia.Keypoint(x=3, y=45),
ia.Keypoint(x=45, y=3), ia.Keypoint(x=7, y=48)]
kpsoi = ia.KeypointsOnImage(kps, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = aug.augment_keypoints([kpsoi])[0]
d = kpsoi.to_xy_array() - observed.to_xy_array()
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 1e-8
def test_keypoints_no_movement_if_sigma_below_threshold(self):
# for small sigma, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=1.0):
kps = [
ia.Keypoint(x=1, y=1), ia.Keypoint(x=15, y=25),
ia.Keypoint(x=5, y=5), ia.Keypoint(x=7, y=4),
ia.Keypoint(x=48, y=5), ia.Keypoint(x=21, y=37),
ia.Keypoint(x=32, y=39), ia.Keypoint(x=6, y=8),
ia.Keypoint(x=12, y=21), ia.Keypoint(x=3, y=45),
ia.Keypoint(x=45, y=3), ia.Keypoint(x=7, y=48)]
kpsoi = ia.KeypointsOnImage(kps, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=1.0, sigma=0.001)
observed = aug.augment_keypoints([kpsoi])[0]
d = kpsoi.to_xy_array() - observed.to_xy_array()
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 1e-8
def test_keypoints_small_movement_for_weak_alpha_if_threshold_zero(self):
# for small alpha (at sigma 1.0), should barely move
# if thresholds set to zero
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=0.0):
kps = [
ia.Keypoint(x=1, y=1), ia.Keypoint(x=15, y=25),
ia.Keypoint(x=5, y=5), ia.Keypoint(x=7, y=4),
ia.Keypoint(x=48, y=5), ia.Keypoint(x=21, y=37),
ia.Keypoint(x=32, y=39), ia.Keypoint(x=6, y=8),
ia.Keypoint(x=12, y=21), ia.Keypoint(x=3, y=45),
ia.Keypoint(x=45, y=3), ia.Keypoint(x=7, y=48)]
kpsoi = ia.KeypointsOnImage(kps, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = aug.augment_keypoints([kpsoi])[0]
d = kpsoi.to_xy_array() - observed.to_xy_array()
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 0.5
def test_image_keypoint_alignment(self):
# test alignment between between images and keypoints
image = np.zeros((120, 70), dtype=np.uint8)
s = 3
image[:, 35-s:35+s+1] = 255
kps = [ia.Keypoint(x=35, y=20),
ia.Keypoint(x=35, y=40),
ia.Keypoint(x=35, y=60),
ia.Keypoint(x=35, y=80),
ia.Keypoint(x=35, y=100)]
kpsoi = ia.KeypointsOnImage(kps, shape=image.shape)
aug = iaa.ElasticTransformation(alpha=70, sigma=5)
aug_det = aug.to_deterministic()
images_aug = aug_det.augment_images([image, image])
kpsois_aug = aug_det.augment_keypoints([kpsoi, kpsoi])
count_bad = 0
for image_aug, kpsoi_aug in zip(images_aug, kpsois_aug):
assert kpsoi_aug.shape == (120, 70)
assert len(kpsoi_aug.keypoints) == 5
for kp_aug in kpsoi_aug.keypoints:
x, y = int(np.round(kp_aug.x)), int(np.round(kp_aug.y))
bb = ia.BoundingBox(x1=x-2, x2=x+2+1, y1=y-2, y2=y+2+1)
img_ex = bb.extract_from_image(image_aug)
if np.any(img_ex > 10):
pass # close to expected location
else:
count_bad += 1
assert count_bad <= 1
def test_empty_keypoints(self):
aug = iaa.ElasticTransformation(alpha=10, sigma=10)
kpsoi = ia.KeypointsOnImage([], shape=(10, 10, 3))
kpsoi_aug = aug.augment_keypoints(kpsoi)
assert len(kpsoi_aug.keypoints) == 0
assert kpsoi_aug.shape == (10, 10, 3)
# -----------
# abstract methods for polygons and line strings
# -----------
@classmethod
def _test_cbaois_no_movement_if_alpha_below_threshold(
cls, cba_class, cbaoi_class, augf_name):
# for small alpha, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=1.0, sigma=0.0):
cba = cba_class([(10, 15), (40, 15), (40, 35), (10, 35)])
cbaoi = cbaoi_class([cba], shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = getattr(aug, augf_name)(cbaoi)
assert observed.shape == (50, 50)
assert len(observed.items) == 1
assert observed.items[0].coords_almost_equals(cba)
if hasattr(observed.items[0], "is_valid"):
assert observed.items[0].is_valid
@classmethod
def _test_cbaois_no_movement_if_sigma_below_threshold(
cls, cba_class, cbaoi_class, augf_name):
# for small sigma, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=1.0):
cba = cba_class([(10, 15), (40, 15), (40, 35), (10, 35)])
cbaoi = cbaoi_class([cba], shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=1.0, sigma=0.001)
observed = getattr(aug, augf_name)(cbaoi)
assert observed.shape == (50, 50)
assert len(observed.items) == 1
assert observed.items[0].coords_almost_equals(cba)
if hasattr(observed.items[0], "is_valid"):
assert observed.items[0].is_valid
@classmethod
def _test_cbaois_small_movement_for_weak_alpha_if_threshold_zero(
cls, cba_class, cbaoi_class, augf_name):
# for small alpha (at sigma 1.0), should barely move
# if thresholds set to zero
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=0.0):
cba = cba_class([(10, 15), (40, 15), (40, 35), (10, 35)])
cbaoi = cbaoi_class([cba], shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = getattr(aug, augf_name)(cbaoi)
assert observed.shape == (50, 50)
assert len(observed.items) == 1
assert observed.items[0].coords_almost_equals(
cba, max_distance=0.5)
if hasattr(observed.items[0], "is_valid"):
assert observed.items[0].is_valid
@classmethod
def _test_image_cbaoi_alignment(cls, cba_class, cbaoi_class, augf_name):
# test alignment between between images and polygons
height_step_size = 50
width_step_size = 30
height_steps = 2 # don't set >2, otherwise polygon will be broken
width_steps = 10
height = (2+height_steps) * height_step_size
width = (2+width_steps) * width_step_size
s = 3
image = np.zeros((height, width), dtype=np.uint8)
points = []
for w in sm.xrange(0, 2+width_steps):
if w not in [0, width_steps+2-1]:
x = width_step_size * w
y = height_step_size
points.append((x, y))
image[y-s:y+s+1, x-s:x+s+1] = 255
for w in sm.xrange(2+width_steps-1, 0, -1):
if w not in [0, width_steps+2-1]:
x = width_step_size * w
y = height_step_size*2
points.append((x, y))
image[y-s:y+s+1, x-s:x+s+1] = 255
cba = cba_class(points)
cbaoi = cbaoi_class([cba], shape=image.shape)
aug = iaa.ElasticTransformation(alpha=100, sigma=7)
aug_det = aug.to_deterministic()
images_aug = aug_det.augment_images([image, image])
cbaois_aug = getattr(aug_det, augf_name)([cbaoi, cbaoi])
count_bad = 0
for image_aug, cbaoi_aug in zip(images_aug, cbaois_aug):
assert cbaoi_aug.shape == image.shape
assert len(cbaoi_aug.items) == 1
for cba_aug in cbaoi_aug.items:
if hasattr(cba_aug, "is_valid"):
assert cba_aug.is_valid
for point_aug in cba_aug.coords:
x, y = point_aug[0], point_aug[1]
bb = ia.BoundingBox(x1=x-2, x2=x+2, y1=y-2, y2=y+2)
img_ex = bb.extract_from_image(image_aug)
if np.any(img_ex > 10):
pass # close to expected location
else:
count_bad += 1
assert count_bad <= 3
@classmethod
def _test_empty_cbaois(cls, cbaoi, augf_name):
aug = iaa.ElasticTransformation(alpha=10, sigma=10)
cbaoi_aug = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(cbaoi_aug, cbaoi)
# -----------
# polygons
# -----------
def test_polygons_no_movement_if_alpha_below_threshold(self):
self._test_cbaois_no_movement_if_alpha_below_threshold(
ia.Polygon, ia.PolygonsOnImage, "augment_polygons")
def test_polygons_no_movement_if_sigma_below_threshold(self):
self._test_cbaois_no_movement_if_sigma_below_threshold(
ia.Polygon, ia.PolygonsOnImage, "augment_polygons")
def test_polygons_small_movement_for_weak_alpha_if_threshold_zero(self):
self._test_cbaois_small_movement_for_weak_alpha_if_threshold_zero(
ia.Polygon, ia.PolygonsOnImage, "augment_polygons")
def test_image_polygon_alignment(self):
self._test_image_cbaoi_alignment(
ia.Polygon, ia.PolygonsOnImage, "augment_polygons")
def test_empty_polygons(self):
cbaoi = ia.PolygonsOnImage([], shape=(10, 10, 3))
self._test_empty_cbaois(cbaoi, "augment_polygons")
# -----------
# line strings
# -----------
def test_line_strings_no_movement_if_alpha_below_threshold(self):
self._test_cbaois_no_movement_if_alpha_below_threshold(
ia.LineString, ia.LineStringsOnImage, "augment_line_strings")
def test_line_strings_no_movement_if_sigma_below_threshold(self):
self._test_cbaois_no_movement_if_sigma_below_threshold(
ia.LineString, ia.LineStringsOnImage, "augment_line_strings")
def test_line_strings_small_movement_for_weak_alpha_if_threshold_zero(self):
self._test_cbaois_small_movement_for_weak_alpha_if_threshold_zero(
ia.LineString, ia.LineStringsOnImage, "augment_line_strings")
def test_image_line_string_alignment(self):
self._test_image_cbaoi_alignment(
ia.LineString, ia.LineStringsOnImage, "augment_line_strings")
def test_empty_line_strings(self):
cbaoi = ia.LineStringsOnImage([], shape=(10, 10, 3))
self._test_empty_cbaois(cbaoi, "augment_line_strings")
# -----------
# bounding boxes
# -----------
def test_bounding_boxes_no_movement_if_alpha_below_threshold(self):
# for small alpha, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=1.0, sigma=0.0):
bbs = [
ia.BoundingBox(x1=10, y1=12, x2=20, y2=22),
ia.BoundingBox(x1=20, y1=32, x2=40, y2=42)
]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = aug.augment_bounding_boxes([bbsoi])[0]
d = bbsoi.to_xyxy_array() - observed.to_xyxy_array()
d = d.reshape((2*2, 2))
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 1e-8
def test_bounding_boxes_no_movement_if_sigma_below_threshold(self):
# for small sigma, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=1.0):
bbs = [
ia.BoundingBox(x1=10, y1=12, x2=20, y2=22),
ia.BoundingBox(x1=20, y1=32, x2=40, y2=42)
]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=1.0, sigma=0.001)
observed = aug.augment_bounding_boxes([bbsoi])[0]
d = bbsoi.to_xyxy_array() - observed.to_xyxy_array()
d = d.reshape((2*2, 2))
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 1e-8
def test_bounding_boxes_small_movement_for_weak_alpha_if_threshold_zero(
self):
# for small alpha (at sigma 1.0), should barely move
# if thresholds set to zero
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=0.0):
bbs = [
ia.BoundingBox(x1=10, y1=12, x2=20, y2=22),
ia.BoundingBox(x1=20, y1=32, x2=40, y2=42)
]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = aug.augment_bounding_boxes([bbsoi])[0]
d = bbsoi.to_xyxy_array() - observed.to_xyxy_array()
d = d.reshape((2*2, 2))
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 0.5
def test_image_bounding_box_alignment(self):
# test alignment between between images and bounding boxes
image = np.zeros((100, 100), dtype=np.uint8)
image[35:35+1, 35:65+1] = 255
image[65:65+1, 35:65+1] = 255
image[35:65+1, 35:35+1] = 255
image[35:65+1, 65:65+1] = 255
bbs = [
ia.BoundingBox(x1=35.5, y1=35.5, x2=65.5, y2=65.5)
]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=image.shape)
aug = iaa.ElasticTransformation(alpha=70, sigma=5)
images_aug, bbsois_aug = aug(images=[image, image],
bounding_boxes=[bbsoi, bbsoi])
count_bad = 0
for image_aug, bbsoi_aug in zip(images_aug, bbsois_aug):
assert bbsoi_aug.shape == (100, 100)
assert len(bbsoi_aug.bounding_boxes) == 1
for bb_aug in bbsoi_aug.bounding_boxes:
if bb_aug.is_fully_within_image(image_aug):
# top, bottom, left, right
x1 = bb_aug.x1_int
x2 = bb_aug.x2_int
y1 = bb_aug.y1_int
y2 = bb_aug.y2_int
top_row = image_aug[y1-2:y1+2, x1-2:x2+2]
btm_row = image_aug[y2-2:y2+2, x1-2:x2+2]
lft_row = image_aug[y1-2:y2+2, x1-2:x1+2]
rgt_row = image_aug[y1-2:y2+2, x2-2:x2+2]
assert np.max(top_row) > 10
assert np.max(btm_row) > 10
assert np.max(lft_row) > 10
assert np.max(rgt_row) > 10
else:
count_bad += 1
assert count_bad <= 1
def test_empty_bounding_boxes(self):
aug = iaa.ElasticTransformation(alpha=10, sigma=10)
bbsoi = ia.BoundingBoxesOnImage([], shape=(10, 10, 3))
bbsoi_aug = aug.augment_bounding_boxes(bbsoi)
assert len(bbsoi_aug.bounding_boxes) == 0
assert bbsoi_aug.shape == (10, 10, 3)
# -----------
# heatmaps alignment
# -----------
def test_image_heatmaps_alignment(self):
# test alignment between images and heatmaps
img = np.zeros((80, 80), dtype=np.uint8)
img[:, 30:50] = 255
img[30:50, :] = 255
hm = HeatmapsOnImage(img.astype(np.float32)/255.0, shape=(80, 80))
aug = iaa.ElasticTransformation(alpha=60.0, sigma=4.0, mode="constant",
cval=0)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(img)
hm_aug = aug_det.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = hm_aug.arr_0to1 > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (80, 80)
assert hm_aug.arr_0to1.shape == (80, 80, 1)
assert (same / img_aug_mask.size) >= 0.99
def test_image_heatmaps_alignment_if_heatmaps_smaller_than_image(self):
# test alignment between images and heatmaps
# here with heatmaps that are smaller than the image
img = np.zeros((80, 80), dtype=np.uint8)
img[:, 30:50] = 255
img[30:50, :] = 255
img_small = ia.imresize_single_image(
img, (40, 40), interpolation="nearest")
hm = HeatmapsOnImage(
img_small.astype(np.float32)/255.0,
shape=(80, 80))
aug = iaa.ElasticTransformation(
alpha=60.0, sigma=4.0, mode="constant", cval=0)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(img)
hm_aug = aug_det.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, (80, 80), interpolation="nearest"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (80, 80)
assert hm_aug.arr_0to1.shape == (40, 40, 1)
assert (same / img_aug_mask.size) >= 0.94
# -----------
# segmaps alignment
# -----------
def test_image_segmaps_alignment(self):
# test alignment between images and segmaps
img = np.zeros((80, 80), dtype=np.uint8)
img[:, 30:50] = 255
img[30:50, :] = 255
segmaps = SegmentationMapsOnImage(
(img > 0).astype(np.int32),
shape=(80, 80))
aug = iaa.ElasticTransformation(
alpha=60.0, sigma=4.0, mode="constant", cval=0, order=0)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(img)
segmaps_aug = aug_det.augment_segmentation_maps([segmaps])[0]
img_aug_mask = img_aug > 255*0.1
segmaps_aug_mask = segmaps_aug.arr > 0
same = np.sum(img_aug_mask == segmaps_aug_mask[:, :, 0])
assert segmaps_aug.shape == (80, 80)
assert segmaps_aug.arr.shape == (80, 80, 1)
assert (same / img_aug_mask.size) >= 0.99
def test_image_segmaps_alignment_if_heatmaps_smaller_than_image(self):
# test alignment between images and segmaps
# here with segmaps that are smaller than the image
img = np.zeros((80, 80), dtype=np.uint8)
img[:, 30:50] = 255
img[30:50, :] = 255
img_small = ia.imresize_single_image(
img, (40, 40), interpolation="nearest")
segmaps = SegmentationMapsOnImage(
(img_small > 0).astype(np.int32), shape=(80, 80))
aug = iaa.ElasticTransformation(
alpha=60.0, sigma=4.0, mode="constant", cval=0, order=0)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(img)
segmaps_aug = aug_det.augment_segmentation_maps([segmaps])[0]
img_aug_mask = img_aug > 255*0.1
segmaps_aug_mask = ia.imresize_single_image(
segmaps_aug.arr, (80, 80), interpolation="nearest") > 0
same = np.sum(img_aug_mask == segmaps_aug_mask[:, :, 0])
assert segmaps_aug.shape == (80, 80)
assert segmaps_aug.arr.shape == (40, 40, 1)
assert (same / img_aug_mask.size) >= 0.94
# ---------
# unusual channel numbers
# ---------
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ElasticTransformation(alpha=2.0, sigma=2.0)
image_aug = aug(image=image)
assert np.all(image_aug == 0)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
# ---------
# zero-sized axes
# ---------
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
for keep_size in [False, True]:
with self.subTest(shape=shape, keep_size=keep_size):
for _ in sm.xrange(3):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ElasticTransformation(alpha=2.0, sigma=2.0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
# -----------
# get_parameters
# -----------
def test_get_parameters(self):
aug = iaa.ElasticTransformation(
alpha=0.25, sigma=1.0, order=2, cval=10, mode="constant")
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert isinstance(params[3], iap.Deterministic)
assert isinstance(params[4], iap.Deterministic)
assert 0.25 - 1e-8 < params[0].value < 0.25 + 1e-8
assert 1.0 - 1e-8 < params[1].value < 1.0 + 1e-8
assert params[2].value == 2
assert params[3].value == 10
assert params[4].value == "constant"
# -----------
# other dtypes
# -----------
def test_other_dtypes_bool(self):
aug = iaa.ElasticTransformation(sigma=0.5, alpha=5, order=0)
mask = np.zeros((21, 21), dtype=bool)
mask[7:13, 7:13] = True
image = np.zeros((21, 21), dtype=bool)
image[mask] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert not np.all(image_aug == 1)
assert np.any(image_aug[~mask] == 1)
def test_other_dtypes_uint_int(self):
aug = iaa.ElasticTransformation(sigma=0.5, alpha=5, order=0)
mask = np.zeros((21, 21), dtype=bool)
mask[7:13, 7:13] = True
dtypes = ["uint8", "uint16", "uint32", "int8", "int16", "int32"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((21, 21), dtype=dtype)
image[7:13, 7:13] = max_value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert not np.all(image_aug == max_value)
assert np.any(image_aug[~mask] == max_value)
def test_other_dtypes_float(self):
aug = iaa.ElasticTransformation(sigma=0.5, alpha=5, order=0)
mask = np.zeros((21, 21), dtype=bool)
mask[7:13, 7:13] = True
for dtype in ["float16", "float32", "float64"]:
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((21, 21), dtype=dtype)
image[7:13, 7:13] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert not np.all(_isclose(image_aug, np.float128(value)))
assert np.any(_isclose(image_aug[~mask],
np.float128(value)))
def test_other_dtypes_bool_all_orders(self):
mask = np.zeros((50, 50), dtype=bool)
mask[10:40, 20:30] = True
mask[20:30, 10:40] = True
for order in [0, 1, 2, 3, 4, 5]:
aug = iaa.ElasticTransformation(sigma=1.0, alpha=50, order=order)
image = np.zeros((50, 50), dtype=bool)
image[mask] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert not np.all(image_aug == 1)
assert np.any(image_aug[~mask] == 1)
def test_other_dtypes_uint_int_all_orders(self):
mask = np.zeros((50, 50), dtype=bool)
mask[10:40, 20:30] = True
mask[20:30, 10:40] = True
for order in [0, 1, 2, 3, 4, 5]:
aug = iaa.ElasticTransformation(sigma=1.0, alpha=50, order=order)
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
if order == 0:
dtypes = ["uint8", "uint16", "uint32",
"int8", "int16", "int32"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = max_value - min_value
image = np.zeros((50, 50), dtype=dtype)
image[mask] = max_value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
if order == 0:
assert not np.all(image_aug == max_value)
assert np.any(image_aug[~mask] == max_value)
else:
atol = 0.1 * dynamic_range
assert not np.all(
np.isclose(image_aug,
max_value,
rtol=0, atol=atol)
)
assert np.any(
np.isclose(image_aug[~mask],
max_value,
rtol=0, atol=atol))
def test_other_dtypes_float_all_orders(self):
mask = np.zeros((50, 50), dtype=bool)
mask[10:40, 20:30] = True
mask[20:30, 10:40] = True
for order in [0, 1, 2, 3, 4, 5]:
aug = iaa.ElasticTransformation(sigma=1.0, alpha=50, order=order)
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
value = (
0.1 * max_value
if dtype != "float64"
else 0.0001 * max_value)
image = np.zeros((50, 50), dtype=dtype)
image[mask] = value
image_aug = aug.augment_image(image)
if order == 0:
assert image_aug.dtype.name == dtype
assert not np.all(
_isclose(image_aug, np.float128(value))
)
assert np.any(
_isclose(image_aug[~mask], np.float128(value))
)
else:
atol = (
10
if dtype == "float16"
else 0.00001 * max_value)
assert not np.all(
np.isclose(
image_aug,
np.float128(value),
rtol=0, atol=atol
))
assert np.any(
np.isclose(
image_aug[~mask],
np.float128(value),
rtol=0, atol=atol
))
class _TwoValueParam(iap.StochasticParameter):
def __init__(self, v1, v2):
super(_TwoValueParam, self).__init__()
self.v1 = v1
self.v2 = v2
def _draw_samples(self, size, random_state):
arr = np.full(size, self.v1, dtype=np.int32)
arr[1::2] = self.v2
return arr
class TestRot90(unittest.TestCase):
@property
def kp_offset(self):
# set this to -1 when using integer-based KP rotation instead of
# subpixel/float-based rotation
return 0
@property
def image(self):
return np.arange(4*4*3).reshape((4, 4, 3)).astype(np.uint8)
@property
def heatmaps(self):
return HeatmapsOnImage(self.image[..., 0:1].astype(np.float32) / 255,
shape=(4, 4, 3))
@property
def heatmaps_smaller(self):
return HeatmapsOnImage(
np.float32([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]), shape=(4, 8, 3))
@property
def segmaps(self):
return SegmentationMapsOnImage(
self.image[..., 0:1].astype(np.int32), shape=(4, 4, 3))
@property
def segmaps_smaller(self):
return SegmentationMapsOnImage(
np.int32([[0, 1, 2], [3, 4, 5]]), shape=(4, 8, 3))
@property
def kpsoi(self):
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=2, y=3)]
return ia.KeypointsOnImage(kps, shape=(4, 8, 3))
@property
def psoi(self):
return ia.PolygonsOnImage(
[ia.Polygon([(1, 1), (3, 1), (3, 3), (1, 3)])],
shape=(4, 8, 3)
)
@property
def lsoi(self):
return ia.LineStringsOnImage(
[ia.LineString([(1, 1), (3, 1), (3, 3), (1, 3)])],
shape=(4, 8, 3)
)
@property
def bbsoi(self):
return ia.BoundingBoxesOnImage(
[ia.BoundingBox(x1=1, y1=1, x2=3, y2=3)],
shape=(4, 8, 3)
)
@property
def kpsoi_k1(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_kps = [(4-2+kp_offset, 1),
(4-3+kp_offset, 2)]
kps = [ia.Keypoint(x, y) for x, y in expected_k1_kps]
return ia.KeypointsOnImage(kps, shape=(8, 4, 3))
@property
def kpsoi_k2(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_kps = self.kpsoi_k1.to_xy_array()
expected_k2_kps = [
(8-expected_k1_kps[0][1]+kp_offset, expected_k1_kps[0][0]),
(8-expected_k1_kps[1][1]+kp_offset, expected_k1_kps[1][0])]
kps = [ia.Keypoint(x, y) for x, y in expected_k2_kps]
return ia.KeypointsOnImage(kps, shape=(4, 8, 3))
@property
def kpsoi_k3(self):
# without keep size
kp_offset = self.kp_offset
expected_k2_kps = self.kpsoi_k2.to_xy_array()
expected_k3_kps = [
(4-expected_k2_kps[0][1]+kp_offset, expected_k2_kps[0][0]),
(4-expected_k2_kps[1][1]+kp_offset, expected_k2_kps[1][0])]
kps = [ia.Keypoint(x, y) for x, y in expected_k3_kps]
return ia.KeypointsOnImage(kps, shape=(8, 4, 3))
@property
def psoi_k1(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_polys = [(4-1+kp_offset, 1),
(4-1+kp_offset, 3),
(4-3+kp_offset, 3),
(4-3+kp_offset, 1)]
return ia.PolygonsOnImage([ia.Polygon(expected_k1_polys)],
shape=(8, 4, 3))
@property
def psoi_k2(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_polys = self.psoi_k1.polygons[0].exterior
expected_k2_polys = [
(8-expected_k1_polys[0][1]+kp_offset, expected_k1_polys[0][0]),
(8-expected_k1_polys[1][1]+kp_offset, expected_k1_polys[1][0]),
(8-expected_k1_polys[2][1]+kp_offset, expected_k1_polys[2][0]),
(8-expected_k1_polys[3][1]+kp_offset, expected_k1_polys[3][0])]
return ia.PolygonsOnImage([ia.Polygon(expected_k2_polys)],
shape=(4, 8, 3))
@property
def psoi_k3(self):
# without keep size
kp_offset = self.kp_offset
expected_k2_polys = self.psoi_k2.polygons[0].exterior
expected_k3_polys = [
(4-expected_k2_polys[0][1]+kp_offset, expected_k2_polys[0][0]),
(4-expected_k2_polys[1][1]+kp_offset, expected_k2_polys[1][0]),
(4-expected_k2_polys[2][1]+kp_offset, expected_k2_polys[2][0]),
(4-expected_k2_polys[3][1]+kp_offset, expected_k2_polys[3][0])]
return ia.PolygonsOnImage([ia.Polygon(expected_k3_polys)],
shape=(8, 4, 3))
@property
def lsoi_k1(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_ls = [(4-1+kp_offset, 1),
(4-1+kp_offset, 3),
(4-3+kp_offset, 3),
(4-3+kp_offset, 1)]
return ia.LineStringsOnImage([ia.LineString(expected_k1_ls)],
shape=(8, 4, 3))
@property
def lsoi_k2(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_ls = self.psoi_k1.items[0].coords
expected_k2_ls = [
(8-expected_k1_ls[0][1]+kp_offset, expected_k1_ls[0][0]),
(8-expected_k1_ls[1][1]+kp_offset, expected_k1_ls[1][0]),
(8-expected_k1_ls[2][1]+kp_offset, expected_k1_ls[2][0]),
(8-expected_k1_ls[3][1]+kp_offset, expected_k1_ls[3][0])]
return ia.LineStringsOnImage([ia.LineString(expected_k2_ls)],
shape=(4, 8, 3))
@property
def lsoi_k3(self):
# without keep size
kp_offset = self.kp_offset
expected_k2_ls = self.lsoi_k2.items[0].coords
expected_k3_ls = [
(4-expected_k2_ls[0][1]+kp_offset, expected_k2_ls[0][0]),
(4-expected_k2_ls[1][1]+kp_offset, expected_k2_ls[1][0]),
(4-expected_k2_ls[2][1]+kp_offset, expected_k2_ls[2][0]),
(4-expected_k2_ls[3][1]+kp_offset, expected_k2_ls[3][0])]
return ia.LineStringsOnImage([ia.LineString(expected_k3_ls)],
shape=(8, 4, 3))
@property
def bbsoi_k1(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_coords = [
(4-1+kp_offset, 1),
(4-3+kp_offset, 3)]
return ia.BoundingBoxesOnImage([
ia.BoundingBox(
x1=min(expected_k1_coords[0][0], expected_k1_coords[1][0]),
y1=min(expected_k1_coords[0][1], expected_k1_coords[1][1]),
x2=max(expected_k1_coords[1][0], expected_k1_coords[0][0]),
y2=max(expected_k1_coords[1][1], expected_k1_coords[0][1])
)], shape=(8, 4, 3))
@property
def bbsoi_k2(self):
# without keep size
kp_offset = self.kp_offset
coords = self.bbsoi_k1.bounding_boxes[0].coords
expected_k2_coords = [
(8-coords[0][1]+kp_offset, coords[0][0]),
(8-coords[1][1]+kp_offset, coords[1][0])]
return ia.BoundingBoxesOnImage([
ia.BoundingBox(
x1=min(expected_k2_coords[0][0], expected_k2_coords[1][0]),
y1=min(expected_k2_coords[0][1], expected_k2_coords[1][1]),
x2=max(expected_k2_coords[1][0], expected_k2_coords[0][0]),
y2=max(expected_k2_coords[1][1], expected_k2_coords[0][1])
)],
shape=(4, 8, 3))
@property
def bbsoi_k3(self):
# without keep size
kp_offset = self.kp_offset
coords = self.bbsoi_k2.bounding_boxes[0].coords
expected_k3_coords = [
(4-coords[0][1]+kp_offset, coords[0][0]),
(4-coords[1][1]+kp_offset, coords[1][0])]
return ia.BoundingBoxesOnImage([
ia.BoundingBox(
x1=min(expected_k3_coords[0][0], expected_k3_coords[1][0]),
y1=min(expected_k3_coords[0][1], expected_k3_coords[1][1]),
x2=max(expected_k3_coords[1][0], expected_k3_coords[0][0]),
y2=max(expected_k3_coords[1][1], expected_k3_coords[0][1])
)],
shape=(8, 4, 3))
def test___init___k_is_list(self):
aug = iaa.Rot90([1, 3])
assert isinstance(aug.k, iap.Choice)
assert len(aug.k.a) == 2
assert aug.k.a[0] == 1
assert aug.k.a[1] == 3
def test___init___k_is_all(self):
aug = iaa.Rot90(ia.ALL)
assert isinstance(aug.k, iap.Choice)
assert len(aug.k.a) == 4
assert aug.k.a == [0, 1, 2, 3]
def test_images_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
img_aug = aug.augment_image(self.image)
assert img_aug.dtype.name == "uint8"
assert np.array_equal(img_aug, self.image)
def test_heatmaps_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
hms_aug = aug.augment_heatmaps([self.heatmaps])[0]
assert (hms_aug.arr_0to1.dtype.name
== self.heatmaps.arr_0to1.dtype.name)
assert np.allclose(hms_aug.arr_0to1, self.heatmaps.arr_0to1)
assert hms_aug.shape == self.heatmaps.shape
def test_segmaps_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
segmaps_aug = aug.augment_segmentation_maps(
[self.segmaps]
)[0]
assert (
segmaps_aug.arr.dtype.name
== self.segmaps.arr.dtype.name)
assert np.allclose(segmaps_aug.arr, self.segmaps.arr)
assert segmaps_aug.shape == self.segmaps.shape
def test_keypoints_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
kpsoi_aug = aug.augment_keypoints([self.kpsoi])[0]
assert_cbaois_equal(kpsoi_aug, self.kpsoi)
def test_polygons_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
psoi_aug = aug.augment_polygons(self.psoi)
assert_cbaois_equal(psoi_aug, self.psoi)
def test_line_strings_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
lsoi_aug = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(lsoi_aug, self.lsoi)
def test_bounding_boxes_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
bbsoi_aug = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(bbsoi_aug, self.bbsoi)
def test_images_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
img_aug = aug.augment_image(self.image)
assert img_aug.dtype.name == "uint8"
assert np.array_equal(img_aug,
np.rot90(self.image, 1, axes=(1, 0)))
def test_heatmaps_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
hms_aug = aug.augment_heatmaps([self.heatmaps])[0]
assert (hms_aug.arr_0to1.dtype.name
== self.heatmaps.arr_0to1.dtype.name)
assert np.allclose(
hms_aug.arr_0to1,
np.rot90(self.heatmaps.arr_0to1, 1, axes=(1, 0)))
assert hms_aug.shape == (4, 4, 3)
def test_heatmaps_smaller_than_image_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
hms_smaller_aug = aug.augment_heatmaps(
[self.heatmaps_smaller]
)[0]
assert (
hms_smaller_aug.arr_0to1.dtype.name
== self.heatmaps_smaller.arr_0to1.dtype.name)
assert np.allclose(
hms_smaller_aug.arr_0to1,
np.rot90(self.heatmaps_smaller.arr_0to1, 1, axes=(1, 0)))
assert hms_smaller_aug.shape == (8, 4, 3)
def test_segmaps_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
segmaps_aug = aug.augment_segmentation_maps(
[self.segmaps]
)[0]
assert (
segmaps_aug.arr.dtype.name
== self.segmaps.arr.dtype.name)
assert np.allclose(
segmaps_aug.arr,
np.rot90(self.segmaps.arr, 1, axes=(1, 0)))
assert segmaps_aug.shape == (4, 4, 3)
def test_segmaps_smaller_than_image_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
segmaps_smaller_aug = aug.augment_segmentation_maps(
self.segmaps_smaller)
assert (
segmaps_smaller_aug.arr.dtype.name
== self.segmaps_smaller.arr.dtype.name)
assert np.allclose(
segmaps_smaller_aug.arr,
np.rot90(self.segmaps_smaller.arr, 1, axes=(1, 0)))
assert segmaps_smaller_aug.shape == (8, 4, 3)
def test_keypoints_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
kpsoi_aug = aug.augment_keypoints([self.kpsoi])[0]
assert_cbaois_equal(kpsoi_aug, self.kpsoi_k1)
def test_polygons_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
psoi_aug = aug.augment_polygons(self.psoi)
assert_cbaois_equal(psoi_aug, self.psoi_k1)
def test_line_strings_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
lsoi_aug = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(lsoi_aug, self.lsoi_k1)
def test_bounding_boxes_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
bbsoi_aug = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(bbsoi_aug, self.bbsoi_k1)
def test_images_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
img = self.image
img_aug = aug.augment_image(img)
assert img_aug.dtype.name == "uint8"
assert np.array_equal(img_aug, np.rot90(img, 2, axes=(1, 0)))
def test_heatmaps_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
hms = self.heatmaps
hms_aug = aug.augment_heatmaps([hms])[0]
assert hms_aug.arr_0to1.dtype.name == hms.arr_0to1.dtype.name
assert np.allclose(
hms_aug.arr_0to1,
np.rot90(hms.arr_0to1, 2, axes=(1, 0)))
assert hms_aug.shape == (4, 4, 3)
def test_heatmaps_smaller_than_image_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
hms_smaller = self.heatmaps_smaller
hms_smaller_aug = aug.augment_heatmaps([hms_smaller])[0]
assert (hms_smaller_aug.arr_0to1.dtype.name
== hms_smaller.arr_0to1.dtype.name)
assert np.allclose(
hms_smaller_aug.arr_0to1,
np.rot90(hms_smaller.arr_0to1, 2, axes=(1, 0)))
assert hms_smaller_aug.shape == (4, 8, 3)
def test_segmaps_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
segmaps = self.segmaps
segmaps_aug = aug.augment_segmentation_maps([segmaps])[0]
assert segmaps_aug.arr.dtype.name == segmaps.arr.dtype.name
assert np.allclose(
segmaps_aug.arr,
np.rot90(segmaps.arr, 2, axes=(1, 0)))
assert segmaps_aug.shape == (4, 4, 3)
def test_segmaps_smaller_than_image_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
segmaps_smaller = self.segmaps_smaller
segmaps_smaller_aug = aug.augment_segmentation_maps(segmaps_smaller)
assert (segmaps_smaller_aug.arr.dtype.name
== segmaps_smaller.arr.dtype.name)
assert np.allclose(
segmaps_smaller_aug.arr,
np.rot90(segmaps_smaller.arr, 2, axes=(1, 0)))
assert segmaps_smaller_aug.shape == (4, 8, 3)
def test_keypoints_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
kpsoi_aug = aug.augment_keypoints([self.kpsoi])[0]
assert_cbaois_equal(kpsoi_aug, self.kpsoi_k2)
def test_polygons_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
psoi_aug = aug.augment_polygons(self.psoi)
assert_cbaois_equal(psoi_aug, self.psoi_k2)
def test_line_strings_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
lsoi_aug = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(lsoi_aug, self.lsoi_k2)
def test_bounding_boxes_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
bbsoi_aug = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(bbsoi_aug, self.bbsoi_k2)
def test_images_k_is_3_and_minus1(self):
img = self.image
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
img_aug = aug.augment_image(img)
assert img_aug.dtype.name == "uint8"
assert np.array_equal(img_aug, np.rot90(img, 3, axes=(1, 0)))
def test_heatmaps_k_is_3_and_minus1(self):
hms = self.heatmaps
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
hms_aug = aug.augment_heatmaps([hms])[0]
assert (hms_aug.arr_0to1.dtype.name
== hms.arr_0to1.dtype.name)
assert np.allclose(
hms_aug.arr_0to1,
np.rot90(hms.arr_0to1, 3, axes=(1, 0)))
assert hms_aug.shape == (4, 4, 3)
def test_heatmaps_smaller_than_image_k_is_3_and_minus1(self):
hms_smaller = self.heatmaps_smaller
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
hms_smaller_aug = aug.augment_heatmaps([hms_smaller])[0]
assert (hms_smaller_aug.arr_0to1.dtype.name
== hms_smaller.arr_0to1.dtype.name)
assert np.allclose(
hms_smaller_aug.arr_0to1,
np.rot90(hms_smaller.arr_0to1, 3, axes=(1, 0)))
assert hms_smaller_aug.shape == (8, 4, 3)
def test_segmaps_k_is_3_and_minus1(self):
segmaps = self.segmaps
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
segmaps_aug = aug.augment_segmentation_maps([segmaps])[0]
assert (segmaps_aug.arr.dtype.name
== segmaps.arr.dtype.name)
assert np.allclose(
segmaps_aug.arr,
np.rot90(segmaps.arr, 3, axes=(1, 0)))
assert segmaps_aug.shape == (4, 4, 3)
def test_segmaps_smaller_than_image_k_is_3_and_minus1(self):
segmaps_smaller = self.segmaps_smaller
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
segmaps_smaller_aug = aug.augment_segmentation_maps(
segmaps_smaller)
assert (segmaps_smaller_aug.arr.dtype.name
== segmaps_smaller.arr.dtype.name)
assert np.allclose(
segmaps_smaller_aug.arr,
np.rot90(segmaps_smaller.arr, 3, axes=(1, 0)))
assert segmaps_smaller_aug.shape == (8, 4, 3)
def test_keypoints_k_is_3_and_minus1(self):
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
kpsoi_aug = aug.augment_keypoints([self.kpsoi])[0]
assert_cbaois_equal(kpsoi_aug, self.kpsoi_k3)
def test_polygons_k_is_3_and_minus1(self):
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
psoi_aug = aug.augment_polygons(self.psoi)
assert_cbaois_equal(psoi_aug, self.psoi_k3)
def test_line_strings_k_is_3_and_minus1(self):
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
lsoi_aug = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(lsoi_aug, self.lsoi_k3)
def test_bounding_boxes_k_is_3_and_minus1(self):
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
bbsoi_aug = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(bbsoi_aug, self.bbsoi_k3)
def test_images_k_is_1_verify_without_using_numpy_rot90(self):
# verify once without np.rot90
aug = iaa.Rot90(k=1, keep_size=False)
image = np.uint8([[1, 0, 0],
[0, 2, 0]])
img_aug = aug.augment_image(image)
expected = np.uint8([[0, 1], [2, 0], [0, 0]])
assert np.array_equal(img_aug, expected)
def test_images_k_is_1_keep_size_is_true(self):
# keep_size=True, k=1
aug = iaa.Rot90(1, keep_size=True)
img_nonsquare = np.arange(5*4*3).reshape((5, 4, 3)).astype(np.uint8)
img_aug = aug.augment_image(img_nonsquare)
assert img_aug.dtype.name == "uint8"
assert np.array_equal(
img_aug,
ia.imresize_single_image(
np.rot90(img_nonsquare, 1, axes=(1, 0)),
(5, 4)
)
)
def test_heatmaps_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
hms = self.heatmaps
hms_aug = aug.augment_heatmaps([hms])[0]
assert hms_aug.arr_0to1.dtype.name == hms.arr_0to1.dtype.name
assert np.allclose(
hms_aug.arr_0to1,
np.rot90(hms.arr_0to1, 1, axes=(1, 0)))
assert hms_aug.shape == (4, 4, 3)
def test_heatmaps_smaller_than_image_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
hms_smaller = self.heatmaps_smaller
hms_smaller_aug = aug.augment_heatmaps([hms_smaller])[0]
hms_smaller_rot = np.rot90(hms_smaller.arr_0to1, 1, axes=(1, 0))
hms_smaller_rot = np.clip(
ia.imresize_single_image(
hms_smaller_rot, (2, 3), interpolation="cubic"
),
0.0, 1.0)
assert (hms_smaller_aug.arr_0to1.dtype.name
== hms_smaller.arr_0to1.dtype.name)
assert np.allclose(hms_smaller_aug.arr_0to1, hms_smaller_rot)
assert hms_smaller_aug.shape == (4, 8, 3)
def test_segmaps_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
segmaps = self.segmaps
segmaps_aug = aug.augment_segmentation_maps([segmaps])[0]
assert (segmaps_aug.arr.dtype.name
== segmaps.arr.dtype.name)
assert np.allclose(segmaps_aug.arr,
np.rot90(segmaps.arr, 1, axes=(1, 0)))
assert segmaps_aug.shape == (4, 4, 3)
def test_segmaps_smaller_than_image_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
segmaps_smaller = self.segmaps_smaller
segmaps_smaller_aug = aug.augment_segmentation_maps(segmaps_smaller)
segmaps_smaller_rot = np.rot90(segmaps_smaller.arr, 1, axes=(1, 0))
segmaps_smaller_rot = ia.imresize_single_image(
segmaps_smaller_rot, (2, 3), interpolation="nearest")
assert (segmaps_smaller_aug.arr.dtype.name
== segmaps_smaller.arr.dtype.name)
assert np.allclose(segmaps_smaller_aug.arr, segmaps_smaller_rot)
assert segmaps_smaller_aug.shape == (4, 8, 3)
def test_keypoints_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
kp_offset = self.kp_offset
kpsoi = self.kpsoi
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
expected = [(4-2+kp_offset, 1), (4-3+kp_offset, 2)]
expected = [(8*x/4, 4*y/8) for x, y in expected]
assert kpsoi_aug.shape == (4, 8, 3)
for kp_aug, kp in zip(kpsoi_aug.keypoints, expected):
assert np.allclose([kp_aug.x, kp_aug.y], [kp[0], kp[1]])
def test_polygons_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
psoi = self.psoi
kp_offset = self.kp_offset
psoi_aug = aug.augment_polygons(psoi)
expected = [(4-1+kp_offset, 1), (4-1+kp_offset, 3),
(4-3+kp_offset, 3), (4-3+kp_offset, 1)]
expected = [(8*x/4, 4*y/8) for x, y in expected]
assert psoi_aug.shape == (4, 8, 3)
assert len(psoi_aug.polygons) == 1
assert psoi_aug.polygons[0].is_valid
assert psoi_aug.polygons[0].exterior_almost_equals(expected)
def test_line_strings_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
lsoi = self.lsoi
kp_offset = self.kp_offset
lsoi_aug = aug.augment_line_strings(lsoi)
expected = [(4-1+kp_offset, 1), (4-1+kp_offset, 3),
(4-3+kp_offset, 3), (4-3+kp_offset, 1)]
expected = [(8*x/4, 4*y/8) for x, y in expected]
assert lsoi_aug.shape == (4, 8, 3)
assert len(lsoi_aug.items) == 1
assert lsoi_aug.items[0].coords_almost_equals(expected)
def test_bounding_boxes_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
bbsoi = self.bbsoi
kp_offset = self.kp_offset
bbsoi_aug = aug.augment_bounding_boxes(bbsoi)
expected = [(4-1+kp_offset, 1),
(4-3+kp_offset, 3)]
expected = [(8*x/4, 4*y/8) for x, y in expected]
expected = np.float32([
[min(expected[0][0], expected[1][0]),
min(expected[0][1], expected[1][1])],
[max(expected[0][0], expected[1][0]),
max(expected[0][1], expected[1][1])]
])
assert bbsoi_aug.shape == (4, 8, 3)
assert len(bbsoi_aug.bounding_boxes) == 1
assert bbsoi_aug.bounding_boxes[0].coords_almost_equals(expected)
def test_images_k_is_list(self):
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
img = self.image
imgs_aug = aug.augment_images([img] * 4)
assert np.array_equal(imgs_aug[0], np.rot90(img, 1, axes=(1, 0)))
assert np.array_equal(imgs_aug[1], np.rot90(img, 2, axes=(1, 0)))
assert np.array_equal(imgs_aug[2], np.rot90(img, 1, axes=(1, 0)))
assert np.array_equal(imgs_aug[3], np.rot90(img, 2, axes=(1, 0)))
def test_heatmaps_smaller_than_image_k_is_list(self):
def _rot_hm(hm, k):
return np.rot90(hm.arr_0to1, k, axes=(1, 0))
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
hms_smaller = self.heatmaps_smaller
hms_aug = aug.augment_heatmaps([hms_smaller] * 4)
assert hms_aug[0].shape == (8, 4, 3)
assert hms_aug[1].shape == (4, 8, 3)
assert hms_aug[2].shape == (8, 4, 3)
assert hms_aug[3].shape == (4, 8, 3)
assert np.allclose(hms_aug[0].arr_0to1, _rot_hm(hms_smaller, 1))
assert np.allclose(hms_aug[1].arr_0to1, _rot_hm(hms_smaller, 2))
assert np.allclose(hms_aug[2].arr_0to1, _rot_hm(hms_smaller, 1))
assert np.allclose(hms_aug[3].arr_0to1, _rot_hm(hms_smaller, 2))
def test_segmaps_smaller_than_image_k_is_list(self):
def _rot_sm(segmap, k):
return np.rot90(segmap.arr, k, axes=(1, 0))
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
segmaps_smaller = self.segmaps_smaller
segmaps_aug = aug.augment_segmentation_maps([segmaps_smaller] * 4)
assert segmaps_aug[0].shape == (8, 4, 3)
assert segmaps_aug[1].shape == (4, 8, 3)
assert segmaps_aug[2].shape == (8, 4, 3)
assert segmaps_aug[3].shape == (4, 8, 3)
assert np.allclose(segmaps_aug[0].arr, _rot_sm(segmaps_smaller, 1))
assert np.allclose(segmaps_aug[1].arr, _rot_sm(segmaps_smaller, 2))
assert np.allclose(segmaps_aug[2].arr, _rot_sm(segmaps_smaller, 1))
assert np.allclose(segmaps_aug[3].arr, _rot_sm(segmaps_smaller, 2))
def test_keypoints_k_is_list(self):
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
kpsoi = self.kpsoi
kpsoi_aug = aug.augment_keypoints([kpsoi] * 4)
assert_cbaois_equal(kpsoi_aug[0], self.kpsoi_k1)
assert_cbaois_equal(kpsoi_aug[1], self.kpsoi_k2)
assert_cbaois_equal(kpsoi_aug[2], self.kpsoi_k1)
assert_cbaois_equal(kpsoi_aug[3], self.kpsoi_k2)
def test_polygons_k_is_list(self):
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
psoi = self.psoi
psoi_aug = aug.augment_polygons([psoi] * 4)
assert_cbaois_equal(psoi_aug[0], self.psoi_k1)
assert_cbaois_equal(psoi_aug[1], self.psoi_k2)
assert_cbaois_equal(psoi_aug[2], self.psoi_k1)
assert_cbaois_equal(psoi_aug[3], self.psoi_k2)
def test_line_strings_k_is_list(self):
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
lsoi = self.lsoi
lsoi_aug = aug.augment_line_strings([lsoi] * 4)
assert_cbaois_equal(lsoi_aug[0], self.lsoi_k1)
assert_cbaois_equal(lsoi_aug[1], self.lsoi_k2)
assert_cbaois_equal(lsoi_aug[2], self.lsoi_k1)
assert_cbaois_equal(lsoi_aug[3], self.lsoi_k2)
def test_bounding_boxes_k_is_list(self):
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
bbsoi = self.bbsoi
bbsoi_aug = aug.augment_bounding_boxes([bbsoi] * 4)
assert_cbaois_equal(bbsoi_aug[0], self.bbsoi_k1)
assert_cbaois_equal(bbsoi_aug[1], self.bbsoi_k2)
assert_cbaois_equal(bbsoi_aug[2], self.bbsoi_k1)
assert_cbaois_equal(bbsoi_aug[3], self.bbsoi_k2)
def test_empty_keypoints(self):
aug = iaa.Rot90(k=1, keep_size=False)
kpsoi = ia.KeypointsOnImage([], shape=(4, 8, 3))
kpsoi_aug = aug.augment_keypoints(kpsoi)
expected = self.kpsoi_k1
expected.keypoints = []
assert_cbaois_equal(kpsoi_aug, expected)
def test_empty_polygons(self):
aug = iaa.Rot90(k=1, keep_size=False)
psoi = ia.PolygonsOnImage([], shape=(4, 8, 3))
psoi_aug = aug.augment_polygons(psoi)
expected = self.psoi_k1
expected.polygons = []
assert_cbaois_equal(psoi_aug, expected)
def test_empty_line_strings(self):
aug = iaa.Rot90(k=1, keep_size=False)
lsoi = ia.LineStringsOnImage([], shape=(4, 8, 3))
lsoi_aug = aug.augment_line_strings(lsoi)
expected = self.lsoi_k1
expected.line_strings = []
assert_cbaois_equal(lsoi_aug, expected)
def test_empty_bounding_boxes(self):
aug = iaa.Rot90(k=1, keep_size=False)
bbsoi = ia.BoundingBoxesOnImage([], shape=(4, 8, 3))
bbsoi_aug = aug.augment_bounding_boxes(bbsoi)
expected = self.bbsoi_k1
expected.bounding_boxes = []
assert_cbaois_equal(bbsoi_aug, expected)
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Rot90(k=1)
image_aug = aug(image=image)
shape_expected = tuple([shape[1], shape[0]] + list(shape[2:]))
assert np.all(image_aug == 0)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape_expected
def test_zero_sized_axes_k_0_or_2(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
for keep_size in [False, True]:
with self.subTest(shape=shape, keep_size=keep_size):
for _ in sm.xrange(10):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Rot90([0, 2], keep_size=keep_size)
image_aug = aug(image=image)
assert image_aug.shape == shape
def test_zero_sized_axes_k_1_or_3_no_keep_size(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
for _ in sm.xrange(10):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Rot90([1, 3], keep_size=False)
image_aug = aug(image=image)
shape_expected = tuple([shape[1], shape[0]]
+ list(shape[2:]))
assert image_aug.shape == shape_expected
def test_zero_sized_axes_k_1_or_3_keep_size(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
for _ in sm.xrange(10):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Rot90([1, 3], keep_size=True)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
def test_get_parameters(self):
aug = iaa.Rot90([1, 3], keep_size=False)
assert aug.get_parameters()[0] == aug.k
assert aug.get_parameters()[1] is False
def test_other_dtypes_bool(self):
aug = iaa.Rot90(2)
image = np.zeros((3, 3), dtype=bool)
image[0, 0] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert np.all(image_aug[0, 0] == 0)
assert np.all(image_aug[2, 2] == 1)
def test_other_dtypes_uint_int(self):
aug = iaa.Rot90(2)
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = max_value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug[0, 0] == 0)
assert np.all(image_aug[2, 2] == max_value)
def test_other_dtypes_float(self):
aug = iaa.Rot90(2)
dtypes = ["float16", "float32", "float64", "float128"]
for dtype in dtypes:
def _allclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.allclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0, 1.0, 10.0, 100.0, 500 ** (isize-1), 1000 ** (isize-1)]
values = values + [(-1) * value for value in values]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert _allclose(image_aug[0, 0], 0)
assert _allclose(image_aug[2, 2], np.float128(value))
Add test to verify correct BB shape after aug in PerspT
from __future__ import print_function, division, absolute_import
import itertools
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import skimage.morphology
import cv2
import imgaug as ia
from imgaug import random as iarandom
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (
array_equal_lists, keypoints_equal, reseed, assert_cbaois_equal)
from imgaug.augmentables.heatmaps import HeatmapsOnImage
from imgaug.augmentables.segmaps import SegmentationMapsOnImage
def _assert_same_min_max(observed, actual):
assert np.isclose(observed.min_value, actual.min_value, rtol=0, atol=1e-6)
assert np.isclose(observed.max_value, actual.max_value, rtol=0, atol=1e-6)
def _assert_same_shape(observed, actual):
assert observed.shape == actual.shape
# TODO add more tests for Affine .mode
# TODO add more tests for Affine shear
class TestAffine(unittest.TestCase):
def test_get_parameters(self):
aug = iaa.Affine(scale=1, translate_px=2, rotate=3, shear=4,
order=1, cval=0, mode="constant", backend="cv2",
fit_output=True)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic) # scale
assert isinstance(params[1], iap.Deterministic) # translate
assert isinstance(params[2], iap.Deterministic) # rotate
assert isinstance(params[3], iap.Deterministic) # shear
assert params[0].value == 1 # scale
assert params[1].value == 2 # translate
assert params[2].value == 3 # rotate
assert params[3].value == 4 # shear
assert params[4].value == 1 # order
assert params[5].value == 0 # cval
assert params[6].value == "constant" # mode
assert params[7] == "cv2" # backend
assert params[8] is True # fit_output
class TestAffine___init__(unittest.TestCase):
def test___init___scale_is_stochastic_parameter(self):
aug = iaa.Affine(scale=iap.Uniform(0.7, 0.9))
assert isinstance(aug.scale, iap.Uniform)
assert isinstance(aug.scale.a, iap.Deterministic)
assert isinstance(aug.scale.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.scale.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.scale.b.value < 0.9 + 1e-8
def test___init___translate_percent_is_stochastic_parameter(self):
aug = iaa.Affine(translate_percent=iap.Uniform(0.7, 0.9))
assert isinstance(aug.translate, iap.Uniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.translate.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.translate.b.value < 0.9 + 1e-8
def test___init___translate_px_is_stochastic_parameter(self):
aug = iaa.Affine(translate_px=iap.DiscreteUniform(1, 10))
assert isinstance(aug.translate, iap.DiscreteUniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert aug.translate.a.value == 1
assert aug.translate.b.value == 10
def test___init___rotate_is_stochastic_parameter(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=iap.Uniform(10, 20),
shear=0)
assert isinstance(aug.rotate, iap.Uniform)
assert isinstance(aug.rotate.a, iap.Deterministic)
assert aug.rotate.a.value == 10
assert isinstance(aug.rotate.b, iap.Deterministic)
assert aug.rotate.b.value == 20
def test___init___shear_is_stochastic_parameter(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0,
shear=iap.Uniform(10, 20))
assert isinstance(aug.shear, iap.Uniform)
assert isinstance(aug.shear.a, iap.Deterministic)
assert aug.shear.a.value == 10
assert isinstance(aug.shear.b, iap.Deterministic)
assert aug.shear.b.value == 20
def test___init___cval_is_all(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=ia.ALL)
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
def test___init___cval_is_stochastic_parameter(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=iap.DiscreteUniform(1, 5))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 1
assert aug.cval.b.value == 5
def test___init___mode_is_all(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
def test___init___mode_is_string(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode="edge")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "edge"
def test___init___mode_is_list(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=["constant", "edge"])
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "constant" in aug.mode.a
and "edge" in aug.mode.a)
def test___init___mode_is_stochastic_parameter(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=iap.Choice(["constant", "edge"]))
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "constant" in aug.mode.a
and "edge" in aug.mode.a)
def test___init___fit_output_is_true(self):
aug = iaa.Affine(fit_output=True)
assert aug.fit_output is True
# ------------
# exceptions for bad inputs
# ------------
def test___init___bad_datatype_for_scale_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(scale=False)
def test___init___bad_datatype_for_translate_px_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(translate_px=False)
def test___init___bad_datatype_for_translate_percent_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(translate_percent=False)
def test___init___bad_datatype_for_rotate_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(scale=1.0, translate_px=0, rotate=False, shear=0,
cval=0)
def test___init___bad_datatype_for_shear_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=False,
cval=0)
def test___init___bad_datatype_for_cval_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=None)
def test___init___bad_datatype_for_mode_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=False)
def test___init___bad_datatype_for_order_fails(self):
# bad order datatype in case of backend=cv2
with self.assertRaises(Exception):
_ = iaa.Affine(backend="cv2", order="test")
def test___init___nonexistent_order_for_cv2_fails(self):
# non-existent order in case of backend=cv2
with self.assertRaises(AssertionError):
_ = iaa.Affine(backend="cv2", order=-1)
# TODO add test with multiple images
class TestAffine_noop(unittest.TestCase):
def setUp(self):
reseed()
@property
def base_img(self):
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
return base_img[:, :, np.newaxis]
@property
def images(self):
return np.array([self.base_img])
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
return [ia.KeypointsOnImage(kps, shape=self.base_img.shape)]
@property
def psoi(self):
polygons = [ia.Polygon([(0, 0), (2, 0), (2, 2)])]
return [ia.PolygonsOnImage(polygons, shape=self.base_img.shape)]
@property
def lsoi(self):
ls = [ia.LineString([(0, 0), (2, 0), (2, 2)])]
return [ia.LineStringsOnImage(ls, shape=self.base_img.shape)]
@property
def bbsoi(self):
bbs = [ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.base_img.shape)]
def test_image_noop(self):
# no translation/scale/rotate/shear, shouldnt change nothing
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_image_noop__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_image_noop__list(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images([self.base_img])
expected = [self.base_img]
assert array_equal_lists(observed, expected)
def test_image_noop__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.base_img])
expected = [self.base_img]
assert array_equal_lists(observed, expected)
def test_keypoints_noop(self):
self._test_cba_noop("augment_keypoints", self.kpsoi, False)
def test_keypoints_noop__deterministic(self):
self._test_cba_noop("augment_keypoints", self.kpsoi, True)
def test_polygons_noop(self):
self._test_cba_noop("augment_polygons", self.psoi, False)
def test_polygons_noop__deterministic(self):
self._test_cba_noop("augment_polygons", self.psoi, True)
def test_line_strings_noop(self):
self._test_cba_noop("augment_line_strings", self.lsoi, False)
def test_line_strings_noop__deterministic(self):
self._test_cba_noop("augment_line_strings", self.lsoi, True)
def test_bounding_boxes_noop(self):
self._test_cba_noop("augment_bounding_boxes", self.bbsoi, False)
def test_bounding_boxes_noop__deterministic(self):
self._test_cba_noop("augment_bounding_boxes", self.bbsoi, True)
@classmethod
def _test_cba_noop(cls, augf_name, cbaoi, deterministic):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
if deterministic:
aug = aug.to_deterministic()
observed = getattr(aug, augf_name)(cbaoi)
expected = cbaoi
assert_cbaois_equal(observed, expected)
# TODO add test with multiple images
class TestAffine_scale(unittest.TestCase):
def setUp(self):
reseed()
# ---------------------
# scale: zoom in
# ---------------------
@property
def base_img(self):
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
return base_img[:, :, np.newaxis]
@property
def images(self):
return np.array([self.base_img])
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
return [ia.KeypointsOnImage(kps, shape=self.base_img.shape)]
def kpsoi_scaled(self, scale_y, scale_x):
coords = np.array([
[0, 0],
[1, 1],
[2, 2]
], dtype=np.float32)
coords_scaled = self._scale_coordinates(coords, scale_y, scale_x)
return [ia.KeypointsOnImage.from_xy_array(
coords_scaled,
shape=self.base_img.shape)]
@property
def psoi(self):
polys = [ia.Polygon([(0, 0), (0, 2), (2, 2)])]
return [ia.PolygonsOnImage(polys, shape=self.base_img.shape)]
def psoi_scaled(self, scale_y, scale_x):
coords = np.array([
[0, 0],
[0, 2],
[2, 2]
], dtype=np.float32)
coords_scaled = self._scale_coordinates(coords, scale_y, scale_x)
return [ia.PolygonsOnImage(
[ia.Polygon(coords_scaled)],
shape=self.base_img.shape)]
@property
def lsoi(self):
ls = [ia.LineString([(0, 0), (0, 2), (2, 2)])]
return [ia.LineStringsOnImage(ls, shape=self.base_img.shape)]
def lsoi_scaled(self, scale_y, scale_x):
coords = np.array([
[0, 0],
[0, 2],
[2, 2]
], dtype=np.float32)
coords_scaled = self._scale_coordinates(coords, scale_y, scale_x)
return [ia.LineStringsOnImage(
[ia.LineString(coords_scaled)],
shape=self.base_img.shape)]
@property
def bbsoi(self):
bbs = [ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.base_img.shape)]
def bbsoi_scaled(self, scale_y, scale_x):
coords = np.array([
[0, 1],
[2, 3]
], dtype=np.float32)
coords_scaled = self._scale_coordinates(coords, scale_y, scale_x)
return [ia.BoundingBoxesOnImage.from_xyxy_array(
coords_scaled.reshape((1, 4)),
shape=self.base_img.shape)]
def _scale_coordinates(self, coords, scale_y, scale_x):
height, width = self.base_img.shape[0:2]
coords_scaled = []
for x, y in coords:
# the additional +0.5 and -0.5 here makes up for the shift factor
# used in the affine matrix generation
offset = 0.0
x_centered = x - width/2 + offset
y_centered = y - height/2 + offset
x_new = x_centered * scale_x + width/2 - offset
y_new = y_centered * scale_y + height/2 - offset
coords_scaled.append((x_new, y_new))
return np.float32(coords_scaled)
@property
def scale_zoom_in_outer_pixels(self):
base_img = self.base_img
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
return outer_pixels
def test_image_scale_zoom_in(self):
aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images(self.images)
outer_pixels = self.scale_zoom_in_outer_pixels
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
def test_image_scale_zoom_in__deterministic(self):
aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
outer_pixels = self.scale_zoom_in_outer_pixels
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
def test_image_scale_zoom_in__list(self):
aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images([self.base_img])
outer_pixels = self.scale_zoom_in_outer_pixels
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
def test_image_scale_zoom_in__list_and_deterministic(self):
aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.base_img])
outer_pixels = self.scale_zoom_in_outer_pixels
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
def test_keypoints_scale_zoom_in(self):
self._test_cba_scale(
"augment_keypoints", 1.75,
self.kpsoi, self.kpsoi_scaled(1.75, 1.75), False)
def test_keypoints_scale_zoom_in__deterministic(self):
self._test_cba_scale(
"augment_keypoints", 1.75,
self.kpsoi, self.kpsoi_scaled(1.75, 1.75), True)
def test_polygons_scale_zoom_in(self):
self._test_cba_scale(
"augment_polygons", 1.75,
self.psoi, self.psoi_scaled(1.75, 1.75), False)
def test_polygons_scale_zoom_in__deterministic(self):
self._test_cba_scale(
"augment_polygons", 1.75,
self.psoi, self.psoi_scaled(1.75, 1.75), True)
def test_line_strings_scale_zoom_in(self):
self._test_cba_scale(
"augment_line_strings", 1.75,
self.lsoi, self.lsoi_scaled(1.75, 1.75), False)
def test_line_strings_scale_zoom_in__deterministic(self):
self._test_cba_scale(
"augment_line_strings", 1.75,
self.lsoi, self.lsoi_scaled(1.75, 1.75), True)
def test_bounding_boxes_scale_zoom_in(self):
self._test_cba_scale(
"augment_bounding_boxes", 1.75,
self.bbsoi, self.bbsoi_scaled(1.75, 1.75), False)
def test_bounding_boxes_scale_zoom_in__deterministic(self):
self._test_cba_scale(
"augment_bounding_boxes", 1.75,
self.bbsoi, self.bbsoi_scaled(1.75, 1.75), True)
@classmethod
def _test_cba_scale(cls, augf_name, scale, cbaoi, cbaoi_scaled,
deterministic):
aug = iaa.Affine(scale=scale, translate_px=0, rotate=0, shear=0)
if deterministic:
aug = aug.to_deterministic()
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi_scaled)
# ---------------------
# scale: zoom in only on x axis
# ---------------------
def test_image_scale_zoom_in_only_x_axis(self):
aug = iaa.Affine(scale={"x": 1.75, "y": 1.0},
translate_px=0, rotate=0, shear=0)
observed = aug.augment_images(self.images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
def test_image_scale_zoom_in_only_x_axis__deterministic(self):
aug = iaa.Affine(scale={"x": 1.75, "y": 1.0},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
def test_image_scale_zoom_in_only_x_axis__list(self):
aug = iaa.Affine(scale={"x": 1.75, "y": 1.0},
translate_px=0, rotate=0, shear=0)
observed = aug.augment_images([self.base_img])
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
def test_image_scale_zoom_in_only_x_axis__deterministic_and_list(self):
aug = iaa.Affine(scale={"x": 1.75, "y": 1.0},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.base_img])
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
def test_keypoints_scale_zoom_in_only_x_axis(self):
self._test_cba_scale(
"augment_keypoints", {"y": 1.0, "x": 1.75}, self.kpsoi,
self.kpsoi_scaled(1.0, 1.75), False)
def test_keypoints_scale_zoom_in_only_x_axis__deterministic(self):
self._test_cba_scale(
"augment_keypoints", {"y": 1.0, "x": 1.75}, self.kpsoi,
self.kpsoi_scaled(1.0, 1.75), True)
def test_polygons_scale_zoom_in_only_x_axis(self):
self._test_cba_scale(
"augment_polygons", {"y": 1.0, "x": 1.75}, self.psoi,
self.psoi_scaled(1.0, 1.75), False)
def test_polygons_scale_zoom_in_only_x_axis__deterministic(self):
self._test_cba_scale(
"augment_polygons", {"y": 1.0, "x": 1.75}, self.psoi,
self.psoi_scaled(1.0, 1.75), True)
def test_line_strings_scale_zoom_in_only_x_axis(self):
self._test_cba_scale(
"augment_line_strings", {"y": 1.0, "x": 1.75}, self.lsoi,
self.lsoi_scaled(1.0, 1.75), False)
def test_line_strings_scale_zoom_in_only_x_axis__deterministic(self):
self._test_cba_scale(
"augment_line_strings", {"y": 1.0, "x": 1.75}, self.lsoi,
self.lsoi_scaled(1.0, 1.75), True)
def test_bounding_boxes_scale_zoom_in_only_x_axis(self):
self._test_cba_scale(
"augment_bounding_boxes", {"y": 1.0, "x": 1.75}, self.bbsoi,
self.bbsoi_scaled(1.0, 1.75), False)
def test_bounding_boxes_scale_zoom_in_only_x_axis__deterministic(self):
self._test_cba_scale(
"augment_bounding_boxes", {"y": 1.0, "x": 1.75}, self.bbsoi,
self.bbsoi_scaled(1.0, 1.75), True)
# ---------------------
# scale: zoom in only on y axis
# ---------------------
def test_image_scale_zoom_in_only_y_axis(self):
aug = iaa.Affine(scale={"x": 1.0, "y": 1.75},
translate_px=0, rotate=0, shear=0)
observed = aug.augment_images(self.images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
def test_image_scale_zoom_in_only_y_axis__deterministic(self):
aug = iaa.Affine(scale={"x": 1.0, "y": 1.75},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
def test_image_scale_zoom_in_only_y_axis__list(self):
aug = iaa.Affine(scale={"x": 1.0, "y": 1.75},
translate_px=0, rotate=0, shear=0)
observed = aug.augment_images([self.base_img])
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
def test_image_scale_zoom_in_only_y_axis__deterministic_and_list(self):
aug = iaa.Affine(scale={"x": 1.0, "y": 1.75},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.base_img])
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
def test_keypoints_scale_zoom_in_only_y_axis(self):
self._test_cba_scale(
"augment_keypoints", {"y": 1.75, "x": 1.0}, self.kpsoi,
self.kpsoi_scaled(1.75, 1.0), False)
def test_keypoints_scale_zoom_in_only_y_axis__deterministic(self):
self._test_cba_scale(
"augment_keypoints", {"y": 1.75, "x": 1.0}, self.kpsoi,
self.kpsoi_scaled(1.75, 1.0), True)
def test_polygons_scale_zoom_in_only_y_axis(self):
self._test_cba_scale(
"augment_polygons", {"y": 1.75, "x": 1.0}, self.psoi,
self.psoi_scaled(1.75, 1.0), False)
def test_polygons_scale_zoom_in_only_y_axis__deterministic(self):
self._test_cba_scale(
"augment_polygons", {"y": 1.75, "x": 1.0}, self.psoi,
self.psoi_scaled(1.75, 1.0), True)
def test_line_strings_scale_zoom_in_only_y_axis(self):
self._test_cba_scale(
"augment_polygons", {"y": 1.75, "x": 1.0}, self.psoi,
self.psoi_scaled(1.75, 1.0), False)
def test_line_strings_scale_zoom_in_only_y_axis__deterministic(self):
self._test_cba_scale(
"augment_line_strings", {"y": 1.75, "x": 1.0}, self.lsoi,
self.lsoi_scaled(1.75, 1.0), True)
def test_bounding_boxes_scale_zoom_in_only_y_axis(self):
self._test_cba_scale(
"augment_bounding_boxes", {"y": 1.75, "x": 1.0}, self.bbsoi,
self.bbsoi_scaled(1.75, 1.0), False)
def test_bounding_boxes_scale_zoom_in_only_y_axis__deterministic(self):
self._test_cba_scale(
"augment_bounding_boxes", {"y": 1.75, "x": 1.0}, self.bbsoi,
self.bbsoi_scaled(1.75, 1.0), True)
# ---------------------
# scale: zoom out
# ---------------------
# these tests use a 4x4 area of all 255, which is zoomed out to a 4x4 area
# in which the center 2x2 area is 255
# zoom in should probably be adapted to this style
# no separate tests here for x/y axis, should work fine if zoom in works
# with that
@property
def scale_zoom_out_base_img(self):
return np.ones((4, 4, 1), dtype=np.uint8) * 255
@property
def scale_zoom_out_images(self):
return np.array([self.scale_zoom_out_base_img])
@property
def scale_zoom_out_outer_pixels(self):
outer_pixels = ([], [])
for y in sm.xrange(4):
xs = sm.xrange(4) if y in [0, 3] else [0, 3]
for x in xs:
outer_pixels[0].append(y)
outer_pixels[1].append(x)
return outer_pixels
@property
def scale_zoom_out_inner_pixels(self):
return [1, 1, 2, 2], [1, 2, 1, 2]
@property
def scale_zoom_out_kpsoi(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=3, y=0),
ia.Keypoint(x=0, y=3), ia.Keypoint(x=3, y=3)]
return [ia.KeypointsOnImage(kps,
shape=self.scale_zoom_out_base_img.shape)]
@property
def scale_zoom_out_kpsoi_aug(self):
kps_aug = [ia.Keypoint(x=0.765, y=0.765),
ia.Keypoint(x=2.235, y=0.765),
ia.Keypoint(x=0.765, y=2.235),
ia.Keypoint(x=2.235, y=2.235)]
return [ia.KeypointsOnImage(kps_aug,
shape=self.scale_zoom_out_base_img.shape)]
def test_image_scale_zoom_out(self):
aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images(self.scale_zoom_out_images)
outer_pixels = self.scale_zoom_out_outer_pixels
inner_pixels = self.scale_zoom_out_inner_pixels
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
def test_image_scale_zoom_out__deterministic(self):
aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.scale_zoom_out_images)
outer_pixels = self.scale_zoom_out_outer_pixels
inner_pixels = self.scale_zoom_out_inner_pixels
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
def test_image_scale_zoom_out__list(self):
aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images([self.scale_zoom_out_base_img])
outer_pixels = self.scale_zoom_out_outer_pixels
inner_pixels = self.scale_zoom_out_inner_pixels
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
def test_image_scale_zoom_out__list_and_deterministic(self):
aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.scale_zoom_out_base_img])
outer_pixels = self.scale_zoom_out_outer_pixels
inner_pixels = self.scale_zoom_out_inner_pixels
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
def test_keypoints_scale_zoom_out(self):
self._test_cba_scale(
"augment_keypoints", 0.49, self.kpsoi,
self.kpsoi_scaled(0.49, 0.49), False)
def test_keypoints_scale_zoom_out__deterministic(self):
self._test_cba_scale(
"augment_keypoints", 0.49, self.kpsoi,
self.kpsoi_scaled(0.49, 0.49), True)
def test_polygons_scale_zoom_out(self):
self._test_cba_scale(
"augment_polygons", 0.49, self.psoi,
self.psoi_scaled(0.49, 0.49), False)
def test_polygons_scale_zoom_out__deterministic(self):
self._test_cba_scale(
"augment_polygons", 0.49, self.psoi,
self.psoi_scaled(0.49, 0.49), True)
def test_line_strings_scale_zoom_out(self):
self._test_cba_scale(
"augment_line_strings", 0.49, self.lsoi,
self.lsoi_scaled(0.49, 0.49), False)
def test_line_strings_scale_zoom_out__deterministic(self):
self._test_cba_scale(
"augment_line_strings", 0.49, self.lsoi,
self.lsoi_scaled(0.49, 0.49), True)
def test_bounding_boxes_scale_zoom_out(self):
self._test_cba_scale(
"augment_bounding_boxes", 0.49, self.bbsoi,
self.bbsoi_scaled(0.49, 0.49), False)
def test_bounding_boxes_scale_zoom_out__deterministic(self):
self._test_cba_scale(
"augment_bounding_boxes", 0.49, self.bbsoi,
self.bbsoi_scaled(0.49, 0.49), True)
# ---------------------
# scale: x and y axis are both tuples
# ---------------------
def test_image_x_and_y_axis_are_tuples(self):
aug = iaa.Affine(scale={"x": (0.5, 1.5), "y": (0.5, 1.5)},
translate_px=0, rotate=0, shear=0)
image = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 2, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint8) * 100
image = image[:, :, np.newaxis]
images = np.array([image])
last_aug = None
nb_changed_aug = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
if i == 0:
last_aug = observed_aug
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
last_aug = observed_aug
assert nb_changed_aug >= int(nb_iterations * 0.8)
def test_image_x_and_y_axis_are_tuples__deterministic(self):
aug = iaa.Affine(scale={"x": (0.5, 1.5), "y": (0.5, 1.5)},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 2, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint8) * 100
image = image[:, :, np.newaxis]
images = np.array([image])
last_aug_det = None
nb_changed_aug_det = 0
nb_iterations = 10
for i in sm.xrange(nb_iterations):
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug_det = observed_aug_det
assert nb_changed_aug_det == 0
# ------------
# alignment
# TODO add alignment tests for: BBs, Polys, LS
# ------------
def test_keypoint_alignment(self):
image = np.zeros((100, 100), dtype=np.uint8)
image[40-1:40+2, 40-1:40+2] = 255
image[40-1:40+2, 60-1:60+2] = 255
kps = [ia.Keypoint(x=40, y=40), ia.Keypoint(x=60, y=40)]
kpsoi = ia.KeypointsOnImage(kps, shape=image.shape)
images = [image, image, image]
kpsois = [kpsoi.deepcopy(),
ia.KeypointsOnImage([], shape=image.shape),
kpsoi.deepcopy()]
aug = iaa.Affine(scale=[0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5,
1.6, 1.7],
order=0)
for iter in sm.xrange(40):
images_aug, kpsois_aug = aug(images=images, keypoints=kpsois)
assert kpsois_aug[1].empty
for i in [0, 2]:
image_aug = images_aug[i]
kpsoi_aug = kpsois_aug[i]
for kp in kpsoi_aug.keypoints:
value = image_aug[int(kp.y), int(kp.x)]
assert value > 200
# ------------
# make sure that polygons stay valid upon extreme scaling
# ------------
def test_polygons_stay_valid_when_using_extreme_scalings(self):
scales = [1e-4, 1e-2, 1e2, 1e4]
backends = ["auto", "cv2", "skimage"]
orders = [0, 1, 3]
gen = itertools.product(scales, backends, orders)
for scale, backend, order in gen:
with self.subTest(scale=scale, backend=backend, order=order):
aug = iaa.Affine(scale=scale, order=order)
psoi = ia.PolygonsOnImage([
ia.Polygon([(0, 0), (10, 0), (5, 5)])],
shape=(10, 10))
psoi_aug = aug.augment_polygons(psoi)
poly = psoi_aug.polygons[0]
ext = poly.exterior
assert poly.is_valid
assert ext[0][0] < ext[2][0] < ext[1][0]
assert ext[0][1] < ext[2][1]
assert np.allclose(ext[0][1], ext[1][1])
class TestAffine_translate(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
return np.uint8([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
])[:, :, np.newaxis]
@property
def image_1px_right(self):
return np.uint8([
[0, 0, 0],
[0, 0, 1],
[0, 0, 0]
])[:, :, np.newaxis]
@property
def image_1px_bottom(self):
return np.uint8([
[0, 0, 0],
[0, 0, 0],
[0, 1, 0]
])[:, :, np.newaxis]
@property
def images(self):
return np.array([self.image])
@property
def images_1px_right(self):
return np.array([self.image_1px_right])
@property
def images_1px_bottom(self):
return np.array([self.image_1px_bottom])
@property
def kpsoi(self):
kps = [ia.Keypoint(x=1, y=1)]
return [ia.KeypointsOnImage(kps, shape=self.image.shape)]
@property
def kpsoi_1px_right(self):
kps = [ia.Keypoint(x=2, y=1)]
return [ia.KeypointsOnImage(kps, shape=self.image.shape)]
@property
def kpsoi_1px_bottom(self):
kps = [ia.Keypoint(x=1, y=2)]
return [ia.KeypointsOnImage(kps, shape=self.image.shape)]
@property
def psoi(self):
polys = [ia.Polygon([(0, 0), (2, 0), (2, 2)])]
return [ia.PolygonsOnImage(polys, shape=self.image.shape)]
@property
def psoi_1px_right(self):
polys = [ia.Polygon([(0+1, 0), (2+1, 0), (2+1, 2)])]
return [ia.PolygonsOnImage(polys, shape=self.image.shape)]
@property
def psoi_1px_bottom(self):
polys = [ia.Polygon([(0, 0+1), (2, 0+1), (2, 2+1)])]
return [ia.PolygonsOnImage(polys, shape=self.image.shape)]
@property
def lsoi(self):
ls = [ia.LineString([(0, 0), (2, 0), (2, 2)])]
return [ia.LineStringsOnImage(ls, shape=self.image.shape)]
@property
def lsoi_1px_right(self):
ls = [ia.LineString([(0+1, 0), (2+1, 0), (2+1, 2)])]
return [ia.LineStringsOnImage(ls, shape=self.image.shape)]
@property
def lsoi_1px_bottom(self):
ls = [ia.LineString([(0, 0+1), (2, 0+1), (2, 2+1)])]
return [ia.LineStringsOnImage(ls, shape=self.image.shape)]
@property
def bbsoi(self):
bbs = [ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)]
@property
def bbsoi_1px_right(self):
bbs = [ia.BoundingBox(x1=0+1, y1=1, x2=2+1, y2=3)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)]
@property
def bbsoi_1px_bottom(self):
bbs = [ia.BoundingBox(x1=0, y1=1+1, x2=2, y2=3+1)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)]
# ---------------------
# translate: move one pixel to the right
# ---------------------
def test_image_translate_1px_right(self):
# move one pixel to the right
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0)
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right__list(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0)
observed = aug.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_right])
def test_image_translate_1px_right__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_right])
def test_keypoints_translate_1px_right(self):
self._test_cba_translate_px(
"augment_keypoints", {"x": 1, "y": 0},
self.kpsoi, self.kpsoi_1px_right, False)
def test_keypoints_translate_1px_right__deterministic(self):
self._test_cba_translate_px(
"augment_keypoints", {"x": 1, "y": 0},
self.kpsoi, self.kpsoi_1px_right, True)
def test_polygons_translate_1px_right(self):
self._test_cba_translate_px(
"augment_polygons", {"x": 1, "y": 0},
self.psoi, self.psoi_1px_right, False)
def test_polygons_translate_1px_right__deterministic(self):
self._test_cba_translate_px(
"augment_polygons", {"x": 1, "y": 0},
self.psoi, self.psoi_1px_right, True)
def test_line_strings_translate_1px_right(self):
self._test_cba_translate_px(
"augment_line_strings", {"x": 1, "y": 0},
self.lsoi, self.lsoi_1px_right, False)
def test_line_strings_translate_1px_right__deterministic(self):
self._test_cba_translate_px(
"augment_line_strings", {"x": 1, "y": 0},
self.lsoi, self.lsoi_1px_right, True)
def test_bounding_boxes_translate_1px_right(self):
self._test_cba_translate_px(
"augment_bounding_boxes", {"x": 1, "y": 0},
self.bbsoi, self.bbsoi_1px_right, False)
def test_bounding_boxes_translate_1px_right__deterministic(self):
self._test_cba_translate_px(
"augment_bounding_boxes", {"x": 1, "y": 0},
self.bbsoi, self.bbsoi_1px_right, True)
@classmethod
def _test_cba_translate_px(cls, augf_name, px, cbaoi, cbaoi_translated,
deterministic):
aug = iaa.Affine(scale=1.0, translate_px=px, rotate=0, shear=0)
if deterministic:
aug = aug.to_deterministic()
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi_translated)
def test_image_translate_1px_right_skimage(self):
# move one pixel to the right
# with backend = skimage
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0, backend="skimage")
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right_skimage_order_all(self):
# move one pixel to the right
# with backend = skimage, order=ALL
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0, backend="skimage", order=ia.ALL)
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right_skimage_order_is_list(self):
# move one pixel to the right
# with backend = skimage, order=list
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0, backend="skimage", order=[0, 1, 3])
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right_cv2_order_is_list(self):
# move one pixel to the right
# with backend = cv2, order=list
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0, backend="cv2", order=[0, 1, 3])
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right_cv2_order_is_stoch_param(self):
# move one pixel to the right
# with backend = cv2, order=StochasticParameter
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0, backend="cv2", order=iap.Choice([0, 1, 3]))
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
# ---------------------
# translate: move one pixel to the bottom
# ---------------------
def test_image_translate_1px_bottom(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0,
shear=0)
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_bottom)
def test_image_translate_1px_bottom__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0,
shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_bottom)
def test_image_translate_1px_bottom__list(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0,
shear=0)
observed = aug.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_bottom])
def test_image_translate_1px_bottom__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0,
shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_bottom])
def test_keypoints_translate_1px_bottom(self):
self._test_cba_translate_px(
"augment_keypoints", {"x": 0, "y": 1},
self.kpsoi, self.kpsoi_1px_bottom, False)
def test_keypoints_translate_1px_bottom__deterministic(self):
self._test_cba_translate_px(
"augment_keypoints", {"x": 0, "y": 1},
self.kpsoi, self.kpsoi_1px_bottom, True)
def test_polygons_translate_1px_bottom(self):
self._test_cba_translate_px(
"augment_polygons", {"x": 0, "y": 1},
self.psoi, self.psoi_1px_bottom, False)
def test_polygons_translate_1px_bottom__deterministic(self):
self._test_cba_translate_px(
"augment_polygons", {"x": 0, "y": 1},
self.psoi, self.psoi_1px_bottom, True)
def test_line_strings_translate_1px_bottom(self):
self._test_cba_translate_px(
"augment_line_strings", {"x": 0, "y": 1},
self.lsoi, self.lsoi_1px_bottom, False)
def test_line_strings_translate_1px_bottom__deterministic(self):
self._test_cba_translate_px(
"augment_line_strings", {"x": 0, "y": 1},
self.lsoi, self.lsoi_1px_bottom, True)
def test_bounding_boxes_translate_1px_bottom(self):
self._test_cba_translate_px(
"augment_bounding_boxes", {"x": 0, "y": 1},
self.bbsoi, self.bbsoi_1px_bottom, False)
def test_bounding_boxes_translate_1px_bottom__deterministic(self):
self._test_cba_translate_px(
"augment_bounding_boxes", {"x": 0, "y": 1},
self.bbsoi, self.bbsoi_1px_bottom, True)
# ---------------------
# translate: fraction of the image size (towards the right)
# ---------------------
def test_image_translate_33percent_right(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0.3333, "y": 0},
rotate=0, shear=0)
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_33percent_right__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0.3333, "y": 0},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_33percent_right__list(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0.3333, "y": 0},
rotate=0, shear=0)
observed = aug.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_right])
def test_image_translate_33percent_right__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0.3333, "y": 0},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_right])
def test_keypoints_translate_33percent_right(self):
self._test_cba_translate_percent(
"augment_keypoints", {"x": 0.3333, "y": 0},
self.kpsoi, self.kpsoi_1px_right, False)
def test_keypoints_translate_33percent_right__deterministic(self):
self._test_cba_translate_percent(
"augment_keypoints", {"x": 0.3333, "y": 0},
self.kpsoi, self.kpsoi_1px_right, True)
def test_polygons_translate_33percent_right(self):
self._test_cba_translate_percent(
"augment_polygons", {"x": 0.3333, "y": 0},
self.psoi, self.psoi_1px_right, False)
def test_polygons_translate_33percent_right__deterministic(self):
self._test_cba_translate_percent(
"augment_polygons", {"x": 0.3333, "y": 0},
self.psoi, self.psoi_1px_right, True)
def test_line_strings_translate_33percent_right(self):
self._test_cba_translate_percent(
"augment_line_strings", {"x": 0.3333, "y": 0},
self.lsoi, self.lsoi_1px_right, False)
def test_line_strings_translate_33percent_right__deterministic(self):
self._test_cba_translate_percent(
"augment_line_strings", {"x": 0.3333, "y": 0},
self.lsoi, self.lsoi_1px_right, True)
def test_bounding_boxes_translate_33percent_right(self):
self._test_cba_translate_percent(
"augment_bounding_boxes", {"x": 0.3333, "y": 0},
self.bbsoi, self.bbsoi_1px_right, False)
def test_bounding_boxes_translate_33percent_right__deterministic(self):
self._test_cba_translate_percent(
"augment_bounding_boxes", {"x": 0.3333, "y": 0},
self.bbsoi, self.bbsoi_1px_right, True)
@classmethod
def _test_cba_translate_percent(cls, augf_name, percent, cbaoi,
cbaoi_translated, deterministic):
aug = iaa.Affine(scale=1.0, translate_percent=percent, rotate=0,
shear=0)
if deterministic:
aug = aug.to_deterministic()
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi_translated)
# ---------------------
# translate: fraction of the image size (towards the bottom)
# ---------------------
def test_image_translate_33percent_bottom(self):
# move 33% (one pixel) to the bottom
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0, "y": 0.3333},
rotate=0, shear=0)
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_bottom)
def test_image_translate_33percent_bottom__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0, "y": 0.3333},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_bottom)
def test_image_translate_33percent_bottom__list(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0, "y": 0.3333},
rotate=0, shear=0)
observed = aug.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_bottom])
def test_image_translate_33percent_bottom__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0, "y": 0.3333},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_bottom])
def test_keypoints_translate_33percent_bottom(self):
self._test_cba_translate_percent(
"augment_keypoints", {"x": 0, "y": 0.3333},
self.kpsoi, self.kpsoi_1px_bottom, False)
def test_keypoints_translate_33percent_bottom__deterministic(self):
self._test_cba_translate_percent(
"augment_keypoints", {"x": 0, "y": 0.3333},
self.kpsoi, self.kpsoi_1px_bottom, True)
def test_polygons_translate_33percent_bottom(self):
self._test_cba_translate_percent(
"augment_polygons", {"x": 0, "y": 0.3333},
self.psoi, self.psoi_1px_bottom, False)
def test_polygons_translate_33percent_bottom__deterministic(self):
self._test_cba_translate_percent(
"augment_polygons", {"x": 0, "y": 0.3333},
self.psoi, self.psoi_1px_bottom, True)
def test_line_strings_translate_33percent_bottom(self):
self._test_cba_translate_percent(
"augment_line_strings", {"x": 0, "y": 0.3333},
self.lsoi, self.lsoi_1px_bottom, False)
def test_line_strings_translate_33percent_bottom__deterministic(self):
self._test_cba_translate_percent(
"augment_line_strings", {"x": 0, "y": 0.3333},
self.lsoi, self.lsoi_1px_bottom, True)
def test_bounding_boxes_translate_33percent_bottom(self):
self._test_cba_translate_percent(
"augment_bounding_boxes", {"x": 0, "y": 0.3333},
self.bbsoi, self.bbsoi_1px_bottom, False)
def test_bounding_boxes_translate_33percent_bottom__deterministic(self):
self._test_cba_translate_percent(
"augment_bounding_boxes", {"x": 0, "y": 0.3333},
self.bbsoi, self.bbsoi_1px_bottom, True)
# ---------------------
# translate: axiswise uniform distributions
# ---------------------
def test_image_translate_by_axiswise_uniform_distributions(self):
# 0-1px to left/right and 0-1px to top/bottom
aug = iaa.Affine(scale=1.0, translate_px={"x": (-1, 1), "y": (-1, 1)},
rotate=0, shear=0)
last_aug = None
nb_changed_aug = 0
nb_iterations = 1000
centers_aug = self.image.astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(self.images)
if i == 0:
last_aug = observed_aug
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
last_aug = observed_aug
assert len(observed_aug[0].nonzero()[0]) == 1
centers_aug += (observed_aug[0] > 0)
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert (centers_aug > int(nb_iterations * (1/9 * 0.6))).all()
assert (centers_aug < int(nb_iterations * (1/9 * 1.4))).all()
def test_image_translate_by_axiswise_uniform_distributions__det(self):
# 0-1px to left/right and 0-1px to top/bottom
aug = iaa.Affine(scale=1.0, translate_px={"x": (-1, 1), "y": (-1, 1)},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
last_aug_det = None
nb_changed_aug_det = 0
nb_iterations = 10
centers_aug_det = self.image.astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug_det = aug_det.augment_images(self.images)
if i == 0:
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug_det = observed_aug_det
assert len(observed_aug_det[0].nonzero()[0]) == 1
centers_aug_det += (observed_aug_det[0] > 0)
assert nb_changed_aug_det == 0
# ---------------------
# translate heatmaps
# ---------------------
@property
def heatmaps(self):
return ia.HeatmapsOnImage(
np.float32([
[0.0, 0.5, 0.75],
[0.0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
@property
def heatmaps_1px_right(self):
return ia.HeatmapsOnImage(
np.float32([
[0.0, 0.0, 0.5],
[0.0, 0.0, 0.5],
[0.0, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
def test_heatmaps_translate_1px_right(self):
aug = iaa.Affine(translate_px={"x": 1})
observed = aug.augment_heatmaps([self.heatmaps])[0]
_assert_same_shape(observed, self.heatmaps)
_assert_same_min_max(observed, self.heatmaps)
assert np.array_equal(observed.get_arr(),
self.heatmaps_1px_right.get_arr())
def test_heatmaps_translate_1px_right_should_ignore_cval(self):
# should still use mode=constant cval=0 even when other settings chosen
aug = iaa.Affine(translate_px={"x": 1}, cval=255)
observed = aug.augment_heatmaps([self.heatmaps])[0]
_assert_same_shape(observed, self.heatmaps)
_assert_same_min_max(observed, self.heatmaps)
assert np.array_equal(observed.get_arr(),
self.heatmaps_1px_right.get_arr())
def test_heatmaps_translate_1px_right_should_ignore_mode(self):
aug = iaa.Affine(translate_px={"x": 1}, mode="edge", cval=255)
observed = aug.augment_heatmaps([self.heatmaps])[0]
_assert_same_shape(observed, self.heatmaps)
_assert_same_min_max(observed, self.heatmaps)
assert np.array_equal(observed.get_arr(),
self.heatmaps_1px_right.get_arr())
# ---------------------
# translate segmaps
# ---------------------
@property
def segmaps(self):
return SegmentationMapsOnImage(
np.int32([
[0, 1, 2],
[0, 1, 2],
[2, 2, 2],
]),
shape=(3, 3, 3)
)
@property
def segmaps_1px_right(self):
return SegmentationMapsOnImage(
np.int32([
[0, 0, 1],
[0, 0, 1],
[0, 2, 2],
]),
shape=(3, 3, 3)
)
def test_segmaps_translate_1px_right(self):
aug = iaa.Affine(translate_px={"x": 1})
observed = aug.augment_segmentation_maps([self.segmaps])[0]
_assert_same_shape(observed, self.segmaps)
assert np.array_equal(observed.get_arr(),
self.segmaps_1px_right.get_arr())
def test_segmaps_translate_1px_right_should_ignore_cval(self):
# should still use mode=constant cval=0 even when other settings chosen
aug = iaa.Affine(translate_px={"x": 1}, cval=255)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
_assert_same_shape(observed, self.segmaps)
assert np.array_equal(observed.get_arr(),
self.segmaps_1px_right.get_arr())
def test_segmaps_translate_1px_right_should_ignore_mode(self):
aug = iaa.Affine(translate_px={"x": 1}, mode="edge", cval=255)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
_assert_same_shape(observed, self.segmaps)
assert np.array_equal(observed.get_arr(),
self.segmaps_1px_right.get_arr())
class TestAffine_rotate(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
return np.uint8([
[0, 0, 0],
[255, 255, 255],
[0, 0, 0]
])[:, :, np.newaxis]
@property
def image_rot90(self):
return np.uint8([
[0, 255, 0],
[0, 255, 0],
[0, 255, 0]
])[:, :, np.newaxis]
@property
def images(self):
return np.array([self.image])
@property
def images_rot90(self):
return np.array([self.image_rot90])
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=1)]
return [ia.KeypointsOnImage(kps, shape=self.image.shape)]
@property
def kpsoi_rot90(self):
kps = [ia.Keypoint(x=3-1, y=0), ia.Keypoint(x=3-1, y=1),
ia.Keypoint(x=3-1, y=2)]
return [ia.KeypointsOnImage(kps, shape=self.image_rot90.shape)]
@property
def psoi(self):
polys = [ia.Polygon([(0, 0), (3, 0), (3, 3)])]
return [ia.PolygonsOnImage(polys, shape=self.image.shape)]
@property
def psoi_rot90(self):
polys = [ia.Polygon([(3-0, 0), (3-0, 3), (3-3, 3)])]
return [ia.PolygonsOnImage(polys, shape=self.image_rot90.shape)]
@property
def lsoi(self):
ls = [ia.LineString([(0, 0), (3, 0), (3, 3)])]
return [ia.LineStringsOnImage(ls, shape=self.image.shape)]
@property
def lsoi_rot90(self):
ls = [ia.LineString([(3-0, 0), (3-0, 3), (3-3, 3)])]
return [ia.LineStringsOnImage(ls, shape=self.image_rot90.shape)]
@property
def bbsoi(self):
bbs = [ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)]
@property
def bbsoi_rot90(self):
bbs = [ia.BoundingBox(x1=0, y1=0, x2=2, y2=2)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.image_rot90.shape)]
def test_image_rot90(self):
# rotate by 90 degrees
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)
observed = aug.augment_images(self.images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, self.images_rot90)
def test_image_rot90__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, self.images_rot90)
def test_image_rot90__list(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)
observed = aug.augment_images([self.image])
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, [self.image_rot90])
def test_image_rot90__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, [self.image_rot90])
def test_keypoints_rot90(self):
self._test_cba_rotate(
"augment_keypoints", 90, self.kpsoi, self.kpsoi_rot90, False)
def test_keypoints_rot90__deterministic(self):
self._test_cba_rotate(
"augment_keypoints", 90, self.kpsoi, self.kpsoi_rot90, True)
def test_polygons_rot90(self):
self._test_cba_rotate(
"augment_polygons", 90, self.psoi, self.psoi_rot90, False)
def test_polygons_rot90__deterministic(self):
self._test_cba_rotate(
"augment_polygons", 90, self.psoi, self.psoi_rot90, True)
def test_line_strings_rot90(self):
self._test_cba_rotate(
"augment_line_strings", 90, self.lsoi, self.lsoi_rot90, False)
def test_line_strings_rot90__deterministic(self):
self._test_cba_rotate(
"augment_line_strings", 90, self.lsoi, self.lsoi_rot90, True)
def test_bounding_boxes_rot90(self):
self._test_cba_rotate(
"augment_bounding_boxes", 90, self.bbsoi, self.bbsoi_rot90, False)
def test_bounding_boxes_rot90__deterministic(self):
self._test_cba_rotate(
"augment_bounding_boxes", 90, self.bbsoi, self.bbsoi_rot90, True)
@classmethod
def _test_cba_rotate(cls, augf_name, rotate, cbaoi,
cbaoi_rotated, deterministic):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=rotate,
shear=0)
if deterministic:
aug = aug.to_deterministic()
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi_rotated)
def test_image_rotate_is_tuple_0_to_364_deg(self):
# random rotation 0-364 degrees
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=(0, 364), shear=0)
last_aug = None
nb_changed_aug = 0
nb_iterations = 1000
pixels_sums_aug = self.image.astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(self.images)
if i == 0:
last_aug = observed_aug
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
last_aug = observed_aug
pixels_sums_aug += (observed_aug[0] > 100)
assert nb_changed_aug >= int(nb_iterations * 0.9)
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
# outer pixels, should sometimes be white
# the values here had to be set quite tolerant, the middle pixels at
# top/left/bottom/right get more activation than expected
outer_pixels = ([0, 0, 0, 1, 1, 2, 2, 2],
[0, 1, 2, 0, 2, 0, 1, 2])
assert (
pixels_sums_aug[outer_pixels] > int(nb_iterations * (2/8 * 0.4))
).all()
assert (
pixels_sums_aug[outer_pixels] < int(nb_iterations * (2/8 * 2.0))
).all()
def test_image_rotate_is_tuple_0_to_364_deg__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=(0, 364), shear=0)
aug_det = aug.to_deterministic()
last_aug_det = None
nb_changed_aug_det = 0
nb_iterations = 10
pixels_sums_aug_det = self.image.astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug_det = aug_det.augment_images(self.images)
if i == 0:
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug_det = observed_aug_det
pixels_sums_aug_det += (observed_aug_det[0] > 100)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug_det[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug_det[1, 1] < (nb_iterations * 1.02)
def test_alignment_between_images_and_heatmaps_for_fixed_rot(self):
# measure alignment between images and heatmaps when rotating
for backend in ["auto", "cv2", "skimage"]:
aug = iaa.Affine(rotate=45, backend=backend)
image = np.zeros((7, 6), dtype=np.uint8)
image[:, 2:3+1] = 255
hm = ia.HeatmapsOnImage(image.astype(np.float32)/255, shape=(7, 6))
img_aug = aug.augment_image(image)
hm_aug = aug.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = hm_aug.arr_0to1 > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (7, 6)
assert hm_aug.arr_0to1.shape == (7, 6, 1)
assert (same / img_aug_mask.size) >= 0.95
def test_alignment_between_images_and_smaller_heatmaps_for_fixed_rot(self):
# measure alignment between images and heatmaps when rotating
# here with smaller heatmaps
for backend in ["auto", "cv2", "skimage"]:
aug = iaa.Affine(rotate=45, backend=backend)
image = np.zeros((56, 48), dtype=np.uint8)
image[:, 16:24+1] = 255
hm = ia.HeatmapsOnImage(
ia.imresize_single_image(
image, (28, 24), interpolation="cubic"
).astype(np.float32)/255,
shape=(56, 48)
)
img_aug = aug.augment_image(image)
hm_aug = aug.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, img_aug.shape[0:2], interpolation="cubic"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (56, 48)
assert hm_aug.arr_0to1.shape == (28, 24, 1)
assert (same / img_aug_mask.size) >= 0.9
class TestAffine_cval(unittest.TestCase):
@property
def image(self):
return np.ones((3, 3, 1), dtype=np.uint8) * 255
@property
def images(self):
return np.array([self.image])
def test_image_fixed_cval(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=128)
observed = aug.augment_images(self.images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
def test_image_fixed_cval__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=128)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
def test_image_fixed_cval__list(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=128)
observed = aug.augment_images([self.image])
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
def test_image_fixed_cval__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=128)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
def test_image_cval_is_tuple(self):
# random cvals
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=(0, 255))
last_aug = None
nb_changed_aug = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(self.images)
if i == 0:
last_aug = observed_aug
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
last_aug = observed_aug
assert nb_changed_aug >= int(nb_iterations * 0.9)
def test_image_cval_is_tuple__deterministic(self):
# random cvals
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=(0, 255))
aug_det = aug.to_deterministic()
last_aug_det = None
nb_changed_aug_det = 0
nb_iterations = 10
for i in sm.xrange(nb_iterations):
observed_aug_det = aug_det.augment_images(self.images)
if i == 0:
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug_det = observed_aug_det
assert nb_changed_aug_det == 0
class TestAffine_fit_output(unittest.TestCase):
@property
def image(self):
return np.ones((3, 3, 1), dtype=np.uint8) * 255
@property
def images(self):
return np.array([self.image])
@property
def heatmaps(self):
return ia.HeatmapsOnImage(
np.float32([
[0.0, 0.5, 0.75],
[0.0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=1)]
return [ia.KeypointsOnImage(kps, shape=self.image.shape)]
def test_image_translate(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(translate_px=100, fit_output=True,
backend=backend)
observed = aug.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_keypoints_translate(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(translate_px=100, fit_output=True,
backend=backend)
observed = aug.augment_keypoints(self.kpsoi)
expected = self.kpsoi
assert keypoints_equal(observed, expected)
def test_heatmaps_translate(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(translate_px=100, fit_output=True,
backend=backend)
observed = aug.augment_heatmaps([self.heatmaps])[0]
expected = self.heatmaps
assert np.allclose(observed.arr_0to1, expected.arr_0to1)
def test_image_rot45(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=45, fit_output=True,
backend=backend)
img = np.zeros((10, 10), dtype=np.uint8)
img[0:2, 0:2] = 255
img[-2:, 0:2] = 255
img[0:2, -2:] = 255
img[-2:, -2:] = 255
img_aug = aug.augment_image(img)
_labels, nb_labels = skimage.morphology.label(
img_aug > 240, return_num=True, connectivity=2)
assert nb_labels == 4
def test_heatmaps_rot45(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=45, fit_output=True,
backend=backend)
img = np.zeros((10, 10), dtype=np.uint8)
img[0:2, 0:2] = 255
img[-2:, 0:2] = 255
img[0:2, -2:] = 255
img[-2:, -2:] = 255
hm = ia.HeatmapsOnImage(img.astype(np.float32)/255,
shape=(10, 10))
hm_aug = aug.augment_heatmaps([hm])[0]
_labels, nb_labels = skimage.morphology.label(
hm_aug.arr_0to1 > 240/255, return_num=True, connectivity=2)
assert nb_labels == 4
def test_heatmaps_rot45__heatmaps_smaller_than_image(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=45, fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
hm = HeatmapsOnImage(
ia.imresize_single_image(
img, (40, 40), interpolation="cubic"
).astype(np.float32)/255,
shape=(80, 80)
)
hm_aug = aug.augment_heatmaps([hm])[0]
# these asserts are deactivated because the image size can
# change under fit_output=True
# assert hm_aug.shape == (80, 80)
# assert hm_aug.arr_0to1.shape == (40, 40, 1)
_labels, nb_labels = skimage.morphology.label(
hm_aug.arr_0to1 > 200/255, return_num=True, connectivity=2)
assert nb_labels == 4
def test_image_heatmap_alignment_random_rots(self):
nb_iterations = 50
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
for _ in sm.xrange(nb_iterations):
aug = iaa.Affine(rotate=(0, 364), fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
hm = HeatmapsOnImage(
img.astype(np.float32)/255,
shape=(80, 80)
)
img_aug = aug.augment_image(img)
hm_aug = aug.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, img_aug.shape[0:2],
interpolation="cubic"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.95
def test_image_heatmap_alignment_random_rots__hms_smaller_than_img(self):
nb_iterations = 50
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
for _ in sm.xrange(nb_iterations):
aug = iaa.Affine(rotate=(0, 364), fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
hm = HeatmapsOnImage(
ia.imresize_single_image(
img, (40, 40), interpolation="cubic"
).astype(np.float32)/255,
shape=(80, 80)
)
img_aug = aug.augment_image(img)
hm_aug = aug.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, img_aug.shape[0:2],
interpolation="cubic"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.95
def test_segmaps_rot45(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=45, fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
segmap = SegmentationMapsOnImage(
(img > 100).astype(np.int32),
shape=(80, 80)
)
segmap_aug = aug.augment_segmentation_maps([segmap])[0]
# these asserts are deactivated because the image size can
# change under fit_output=True
# assert segmap_aug.shape == (80, 80)
# assert segmap_aug.arr_0to1.shape == (40, 40, 1)
_labels, nb_labels = skimage.morphology.label(
segmap_aug.arr > 0, return_num=True, connectivity=2)
assert nb_labels == 4
def test_segmaps_rot45__segmaps_smaller_than_img(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=45, fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
segmap = SegmentationMapsOnImage(
(
ia.imresize_single_image(
img, (40, 40), interpolation="cubic"
) > 100
).astype(np.int32),
shape=(80, 80)
)
segmap_aug = aug.augment_segmentation_maps([segmap])[0]
# these asserts are deactivated because the image size can
# change under fit_output=True
# assert segmap_aug.shape == (80, 80)
# assert segmap_aug.arr_0to1.shape == (40, 40, 1)
_labels, nb_labels = skimage.morphology.label(
segmap_aug.arr > 0, return_num=True, connectivity=2)
assert nb_labels == 4
def test_image_segmap_alignment_random_rots(self):
nb_iterations = 50
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
for _ in sm.xrange(nb_iterations):
aug = iaa.Affine(rotate=(0, 364), fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
segmap = SegmentationMapsOnImage(
(img > 100).astype(np.int32),
shape=(80, 80)
)
img_aug = aug.augment_image(img)
segmap_aug = aug.augment_segmentation_maps([segmap])[0]
img_aug_mask = img_aug > 100
segmap_aug_mask = ia.imresize_single_image(
segmap_aug.arr,
img_aug.shape[0:2],
interpolation="nearest"
) > 0
same = np.sum(img_aug_mask == segmap_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.95
def test_image_segmap_alignment_random_rots__sms_smaller_than_img(self):
nb_iterations = 50
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
for _ in sm.xrange(nb_iterations):
aug = iaa.Affine(rotate=(0, 364), fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
segmap = SegmentationMapsOnImage(
(
ia.imresize_single_image(
img, (40, 40), interpolation="cubic"
) > 100
).astype(np.int32),
shape=(80, 80)
)
img_aug = aug.augment_image(img)
segmap_aug = aug.augment_segmentation_maps([segmap])[0]
img_aug_mask = img_aug > 100
segmap_aug_mask = ia.imresize_single_image(
segmap_aug.arr,
img_aug.shape[0:2],
interpolation="nearest"
) > 0
same = np.sum(img_aug_mask == segmap_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.95
def test_keypoints_rot90_without_fit_output(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=90, backend=backend)
kps = ia.KeypointsOnImage([ia.Keypoint(10, 10)],
shape=(100, 200, 3))
kps_aug = aug.augment_keypoints(kps)
assert kps_aug.shape == (100, 200, 3)
assert not np.allclose(
[kps_aug.keypoints[0].x, kps_aug.keypoints[0].y],
[kps.keypoints[0].x, kps.keypoints[0].y],
atol=1e-2, rtol=0)
def test_keypoints_rot90(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=90, fit_output=True, backend=backend)
kps = ia.KeypointsOnImage([ia.Keypoint(10, 10)],
shape=(100, 200, 3))
kps_aug = aug.augment_keypoints(kps)
assert kps_aug.shape == (200, 100, 3)
assert not np.allclose(
[kps_aug.keypoints[0].x, kps_aug.keypoints[0].y],
[kps.keypoints[0].x, kps.keypoints[0].y],
atol=1e-2, rtol=0)
def test_empty_keypoints_rot90(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=90, fit_output=True, backend=backend)
kps = ia.KeypointsOnImage([], shape=(100, 200, 3))
kps_aug = aug.augment_keypoints(kps)
assert kps_aug.shape == (200, 100, 3)
assert len(kps_aug.keypoints) == 0
def _test_cbaoi_rot90_without_fit_output(self, cbaoi, augf_name):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
# verify that shape in PolygonsOnImages changes
aug = iaa.Affine(rotate=90, backend=backend)
cbaoi_aug = getattr(aug, augf_name)([cbaoi, cbaoi])
assert len(cbaoi_aug) == 2
for cbaoi_aug_i in cbaoi_aug:
if isinstance(cbaoi, (ia.PolygonsOnImage,
ia.LineStringsOnImage)):
assert cbaoi_aug_i.shape == cbaoi.shape
assert not cbaoi_aug_i.items[0].coords_almost_equals(
cbaoi.items[0].coords, max_distance=1e-2)
else:
assert_cbaois_equal(cbaoi_aug_i, cbaoi)
def test_polygons_rot90_without_fit_output(self):
psoi = ia.PolygonsOnImage([
ia.Polygon([(10, 10), (20, 10), (20, 20)])
], shape=(100, 200, 3))
self._test_cbaoi_rot90_without_fit_output(psoi, "augment_polygons")
def test_line_strings_rot90_without_fit_output(self):
lsoi = ia.LineStringsOnImage([
ia.LineString([(10, 10), (20, 10), (20, 20), (10, 10)])
], shape=(100, 200, 3))
self._test_cbaoi_rot90_without_fit_output(lsoi, "augment_line_strings")
def _test_cbaoi_rot90(self, cbaoi, expected, augf_name):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=90, fit_output=True, backend=backend)
cbaoi_aug = getattr(aug, augf_name)([cbaoi, cbaoi])
assert len(cbaoi_aug) == 2
for cbaoi_aug_i in cbaoi_aug:
assert_cbaois_equal(cbaoi_aug_i, expected)
def test_polygons_rot90(self):
psoi = ia.PolygonsOnImage([
ia.Polygon([(10, 10), (20, 10), (20, 20)])
], shape=(100, 200, 3))
expected = ia.PolygonsOnImage([
ia.Polygon([(100-10-1, 10), (100-10-1, 20), (100-20-1, 20)])
], shape=(200, 100, 3))
self._test_cbaoi_rot90(psoi, expected, "augment_polygons")
def test_line_strings_rot90(self):
lsoi = ia.LineStringsOnImage([
ia.LineString([(10, 10), (20, 10), (20, 20), (10, 10)])
], shape=(100, 200, 3))
expected = ia.LineStringsOnImage([
ia.LineString([(100-10-1, 10), (100-10-1, 20), (100-20-1, 20),
(100-10-1, 10)])
], shape=(200, 100, 3))
self._test_cbaoi_rot90(lsoi, expected, "augment_line_strings")
def test_bounding_boxes_rot90(self):
lsoi = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=10, y1=10, x2=20, y2=20)
], shape=(100, 200, 3))
expected = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=100-20-1, y1=10, x2=100-10-1, y2=20)
], shape=(200, 100, 3))
self._test_cbaoi_rot90(lsoi, expected, "augment_bounding_boxes")
def _test_empty_cbaoi_rot90(self, cbaoi, expected, augf_name):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=90, fit_output=True, backend=backend)
cbaoi_aug = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(cbaoi_aug, expected)
def test_empty_polygons_rot90(self):
psoi = ia.PolygonsOnImage([], shape=(100, 200, 3))
expected = ia.PolygonsOnImage([], shape=(200, 100, 3))
self._test_empty_cbaoi_rot90(psoi, expected, "augment_polygons")
def test_empty_line_strings_rot90(self):
lsoi = ia.LineStringsOnImage([], shape=(100, 200, 3))
expected = ia.LineStringsOnImage([], shape=(200, 100, 3))
self._test_empty_cbaoi_rot90(lsoi, expected, "augment_line_strings")
def test_empty_bounding_boxes_rot90(self):
bbsoi = ia.BoundingBoxesOnImage([], shape=(100, 200, 3))
expected = ia.BoundingBoxesOnImage([], shape=(200, 100, 3))
self._test_empty_cbaoi_rot90(bbsoi, expected, "augment_bounding_boxes")
# TODO merge these into TestAffine_rotate since they are rotations?
# or extend to contain other affine params too?
class TestAffine_alignment(unittest.TestCase):
def setUp(self):
reseed()
def test_image_keypoint_alignment(self):
aug = iaa.Affine(rotate=[0, 180], order=0)
img = np.zeros((10, 10), dtype=np.uint8)
img[0:5, 5] = 255
img[2, 4:6] = 255
img_rot = [np.copy(img), np.copy(np.flipud(np.fliplr(img)))]
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=5, y=2)], shape=img.shape)
kpsoi_rot = [(5, 2), (5, 10-2)]
img_aug_indices = []
kpsois_aug_indices = []
for _ in sm.xrange(40):
aug_det = aug.to_deterministic()
imgs_aug = aug_det.augment_images([img, img])
kpsois_aug = aug_det.augment_keypoints([kpsoi, kpsoi])
assert kpsois_aug[0].shape == img.shape
assert kpsois_aug[1].shape == img.shape
for img_aug in imgs_aug:
if np.array_equal(img_aug, img_rot[0]):
img_aug_indices.append(0)
elif np.array_equal(img_aug, img_rot[1]):
img_aug_indices.append(1)
else:
assert False
for kpsoi_aug in kpsois_aug:
similar_to_rot_0 = np.allclose(
[kpsoi_aug.keypoints[0].x, kpsoi_aug.keypoints[0].y],
kpsoi_rot[0])
similar_to_rot_180 = np.allclose(
[kpsoi_aug.keypoints[0].x, kpsoi_aug.keypoints[0].y],
kpsoi_rot[1])
if similar_to_rot_0:
kpsois_aug_indices.append(0)
elif similar_to_rot_180:
kpsois_aug_indices.append(1)
else:
assert False
assert np.array_equal(img_aug_indices, kpsois_aug_indices)
assert len(set(img_aug_indices)) == 2
assert len(set(kpsois_aug_indices)) == 2
@classmethod
def _test_image_cbaoi_alignment(cls, cbaoi, cbaoi_rot, augf_name):
aug = iaa.Affine(rotate=[0, 180], order=0)
img = np.zeros((10, 10), dtype=np.uint8)
img[0:5, 5] = 255
img[2, 4:6] = 255
img_rot = [np.copy(img), np.copy(np.flipud(np.fliplr(img)))]
img_aug_indices = []
cbaois_aug_indices = []
for _ in sm.xrange(40):
aug_det = aug.to_deterministic()
imgs_aug = aug_det.augment_images([img, img])
cbaois_aug = getattr(aug_det, augf_name)([cbaoi, cbaoi])
assert cbaois_aug[0].shape == img.shape
assert cbaois_aug[1].shape == img.shape
if hasattr(cbaois_aug[0].items[0], "is_valid"):
assert cbaois_aug[0].items[0].is_valid
assert cbaois_aug[1].items[0].is_valid
for img_aug in imgs_aug:
if np.array_equal(img_aug, img_rot[0]):
img_aug_indices.append(0)
elif np.array_equal(img_aug, img_rot[1]):
img_aug_indices.append(1)
else:
assert False
for cbaoi_aug in cbaois_aug:
if cbaoi_aug.items[0].coords_almost_equals(cbaoi_rot[0]):
cbaois_aug_indices.append(0)
elif cbaoi_aug.items[0].coords_almost_equals(cbaoi_rot[1]):
cbaois_aug_indices.append(1)
else:
assert False
assert np.array_equal(img_aug_indices, cbaois_aug_indices)
assert len(set(img_aug_indices)) == 2
assert len(set(cbaois_aug_indices)) == 2
def test_image_polygon_alignment(self):
psoi = ia.PolygonsOnImage([ia.Polygon([(1, 1), (9, 1), (5, 5)])],
shape=(10, 10))
psoi_rot = [
psoi.polygons[0].deepcopy(),
ia.Polygon([(10-1, 10-1), (10-9, 10-1), (10-5, 10-5)])
]
self._test_image_cbaoi_alignment(psoi, psoi_rot,
"augment_polygons")
def test_image_line_string_alignment(self):
lsoi = ia.LineStringsOnImage([ia.LineString([(1, 1), (9, 1), (5, 5)])],
shape=(10, 10))
lsoi_rot = [
lsoi.items[0].deepcopy(),
ia.LineString([(10-1, 10-1), (10-9, 10-1), (10-5, 10-5)])
]
self._test_image_cbaoi_alignment(lsoi, lsoi_rot,
"augment_line_strings")
def test_image_bounding_box_alignment(self):
bbsoi = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=1, y1=1, x2=9, y2=5)], shape=(10, 10))
bbsoi_rot = [
bbsoi.items[0].deepcopy(),
ia.BoundingBox(x1=10-9, y1=10-5, x2=10-1, y2=10-1)]
self._test_image_cbaoi_alignment(bbsoi, bbsoi_rot,
"augment_bounding_boxes")
class TestAffine_other_dtypes(unittest.TestCase):
@property
def translate_mask(self):
mask = np.zeros((3, 3), dtype=bool)
mask[1, 2] = True
return mask
@property
def image(self):
image = np.zeros((17, 17), dtype=bool)
image[2:15, 5:13] = True
return image
@property
def rot_mask_inner(self):
img_flipped = iaa.Fliplr(1.0)(image=self.image)
return img_flipped == 1
@property
def rot_mask_outer(self):
img_flipped = iaa.Fliplr(1.0)(image=self.image)
return img_flipped == 0
@property
def rot_thresh_inner(self):
return 0.9
@property
def rot_thresh_outer(self):
return 0.9
def rot_thresh_inner_float(self, order):
return 0.85 if order == 1 else 0.7
def rot_thresh_outer_float(self, order):
return 0.85 if order == 1 else 0.4
def test_translate_skimage_order_0_bool(self):
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="skimage")
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert np.all(image_aug[~self.translate_mask] == 0)
assert np.all(image_aug[self.translate_mask] == 1)
def test_translate_skimage_order_0_uint_int(self):
dtypes = ["uint8", "uint16", "uint32", "int8", "int16", "int32"]
for dtype in dtypes:
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="skimage")
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
for value in values:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug[~self.translate_mask] == 0)
assert np.all(image_aug[self.translate_mask] == value)
def test_translate_skimage_order_0_float(self):
# float
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="skimage")
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(_isclose(image_aug[~self.translate_mask], 0))
assert np.all(_isclose(image_aug[self.translate_mask],
np.float128(value)))
def test_rotate_skimage_order_not_0_bool(self):
# skimage, order!=0 and rotate=180
for order in [1, 3, 4, 5]:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="skimage")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
image = np.zeros((17, 17), dtype=bool)
image[2:15, 5:13] = True
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert (
np.sum(image_aug == image_exp)/image.size
) > self.rot_thresh_inner
def test_rotate_skimage_order_not_0_uint_int(self):
def _compute_matching(image_aug, image_exp, mask):
return np.sum(
np.isclose(image_aug[mask], image_exp[mask], rtol=0,
atol=1.001)
) / np.sum(mask)
dtypes = ["uint8", "uint16", "uint32", "int8", "int16", "int32"]
for dtype in dtypes:
for order in [1, 3, 4, 5]:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="skimage")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
for value in values:
with self.subTest(dtype=dtype, order=order, value=value):
image = np.zeros((17, 17), dtype=dtype)
image[2:15, 5:13] = value
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == dtype
assert _compute_matching(
image_aug, image_exp, self.rot_mask_inner
) > self.rot_thresh_inner
assert _compute_matching(
image_aug, image_exp, self.rot_mask_outer
) > self.rot_thresh_outer
def test_rotate_skimage_order_not_0_float(self):
def _compute_matching(image_aug, image_exp, mask):
return np.sum(
_isclose(image_aug[mask], image_exp[mask])
) / np.sum(mask)
for order in [1, 3, 4, 5]:
dtypes = ["float16", "float32", "float64"]
if order == 5:
# float64 caused too many interpolation inaccuracies for
# order=5, not wrong but harder to test
dtypes = ["float16", "float32"]
for dtype in dtypes:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="skimage")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
if order not in [0, 1]:
atol = 1e-2
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
if order not in [3, 4]: # results in NaNs otherwise
values = values + [min_value, max_value]
for value in values:
with self.subTest(order=order, dtype=dtype, value=value):
image = np.zeros((17, 17), dtype=dtype)
image[2:15, 5:13] = value
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == dtype
assert _compute_matching(
image_aug, image_exp, self.rot_mask_inner
) > self.rot_thresh_inner_float(order)
assert _compute_matching(
image_aug, image_exp, self.rot_mask_outer
) > self.rot_thresh_outer_float(order)
def test_translate_cv2_order_0_bool(self):
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="cv2")
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert np.all(image_aug[~self.translate_mask] == 0)
assert np.all(image_aug[self.translate_mask] == 1)
def test_translate_cv2_order_0_uint_int(self):
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="cv2")
dtypes = ["uint8", "uint16", "int8", "int16", "int32"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug[~self.translate_mask] == 0)
assert np.all(image_aug[self.translate_mask] == value)
def test_translate_cv2_order_0_float(self):
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="cv2")
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(_isclose(image_aug[~self.translate_mask], 0))
assert np.all(_isclose(image_aug[self.translate_mask],
np.float128(value)))
def test_rotate_cv2_order_1_and_3_bool(self):
# cv2, order=1 and rotate=180
for order in [1, 3]:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="cv2")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
image = np.zeros((17, 17), dtype=bool)
image[2:15, 5:13] = True
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert (np.sum(image_aug == image_exp) / image.size) > 0.9
def test_rotate_cv2_order_1_and_3_uint_int(self):
# cv2, order=1 and rotate=180
for order in [1, 3]:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="cv2")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
dtypes = ["uint8", "uint16", "int8", "int16"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
for value in values:
with self.subTest(order=order, dtype=dtype, value=value):
image = np.zeros((17, 17), dtype=dtype)
image[2:15, 5:13] = value
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == dtype
assert (
np.sum(image_aug == image_exp) / image.size
) > 0.9
def test_rotate_cv2_order_1_and_3_float(self):
# cv2, order=1 and rotate=180
for order in [1, 3]:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="cv2")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
with self.subTest(order=order, dtype=dtype, value=value):
image = np.zeros((17, 17), dtype=dtype)
image[2:15, 5:13] = value
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == dtype
assert (
np.sum(_isclose(image_aug, image_exp)) / image.size
) > 0.9
class TestAffine_other(unittest.TestCase):
def test_unusual_channel_numbers(self):
nb_channels_lst = [4, 5, 512, 513]
orders = [0, 1, 3]
backends = ["auto", "skimage", "cv2"]
for nb_channels, order, backend in itertools.product(nb_channels_lst,
orders, backends):
with self.subTest(nb_channels=nb_channels, order=order,
backend=backend):
aug = iaa.Affine(translate_px={"x": -1}, mode="constant",
cval=255, order=order, backend=backend)
image = np.full((3, 3, nb_channels), 128, dtype=np.uint8)
heatmap_arr = np.full((3, 3, nb_channels), 0.5,
dtype=np.float32)
heatmap = ia.HeatmapsOnImage(heatmap_arr, shape=image.shape)
image_aug, heatmap_aug = aug(image=image, heatmaps=heatmap)
hm_aug_arr = heatmap_aug.arr_0to1
assert image_aug.shape == (3, 3, nb_channels)
assert heatmap_aug.arr_0to1.shape == (3, 3, nb_channels)
assert heatmap_aug.shape == image.shape
assert np.allclose(image_aug[:, 0:2, :], 128, rtol=0, atol=2)
assert np.allclose(image_aug[:, 2:3, 0:3], 255, rtol=0, atol=2)
assert np.allclose(image_aug[:, 2:3, 3:], 255, rtol=0, atol=2)
assert np.allclose(hm_aug_arr[:, 0:2, :], 0.5, rtol=0,
atol=0.025)
assert np.allclose(hm_aug_arr[:, 2:3, :], 0.0, rtol=0,
atol=0.025)
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 1),
(1, 0, 1)
]
for fit_output in [False, True]:
for shape in shapes:
with self.subTest(shape=shape, fit_output=fit_output):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Affine(rotate=45, fit_output=fit_output)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
# TODO migrate to unittest and split up tests or remove AffineCv2
def test_AffineCv2():
reseed()
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=2, y=2)]
keypoints = [ia.KeypointsOnImage(kps, shape=base_img.shape)]
# no translation/scale/rotate/shear, shouldnt change nothing
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# ---------------------
# scale
# ---------------------
# zoom in
aug = iaa.AffineCv2(scale=1.75, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
# zoom in only on x axis
aug = iaa.AffineCv2(scale={"x": 1.75, "y": 1.0}, translate_px=0,
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
# zoom in only on y axis
aug = iaa.AffineCv2(scale={"x": 1.0, "y": 1.75}, translate_px=0,
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
# zoom out
# this one uses a 4x4 area of all 255, which is zoomed out to a 4x4 area
# in which the center 2x2 area is 255
# zoom in should probably be adapted to this style
# no separate tests here for x/y axis, should work fine if zoom in
# works with that
aug = iaa.AffineCv2(scale=0.49, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.ones((4, 4, 1), dtype=np.uint8) * 255
images = np.array([image])
images_list = [image]
outer_pixels = ([], [])
for y in sm.xrange(4):
xs = sm.xrange(4) if y in [0, 3] else [0, 3]
for x in xs:
outer_pixels[0].append(y)
outer_pixels[1].append(x)
inner_pixels = ([1, 1, 2, 2], [1, 2, 1, 2])
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=3, y=0),
ia.Keypoint(x=0, y=3), ia.Keypoint(x=3, y=3)]
keypoints = [ia.KeypointsOnImage(kps, shape=image.shape)]
kps_aug = [ia.Keypoint(x=0.765, y=0.765), ia.Keypoint(x=2.235, y=0.765),
ia.Keypoint(x=0.765, y=2.235), ia.Keypoint(x=2.235, y=2.235)]
keypoints_aug = [ia.KeypointsOnImage(kps_aug, shape=image.shape)]
observed = aug.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug_det.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug_det.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# varying scales
aug = iaa.AffineCv2(scale={"x": (0.5, 1.5), "y": (0.5, 1.5)},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 2, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint8) * 100
image = image[:, :, np.newaxis]
images = np.array([image])
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
aug = iaa.AffineCv2(scale=iap.Uniform(0.7, 0.9))
assert isinstance(aug.scale, iap.Uniform)
assert isinstance(aug.scale.a, iap.Deterministic)
assert isinstance(aug.scale.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.scale.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.scale.b.value < 0.9 + 1e-8
# ---------------------
# translate
# ---------------------
# move one pixel to the right
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)],
shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)],
shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move one pixel to the right
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with order=ALL
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0, order=ia.ALL)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with order=list
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0, order=[0, 1, 2])
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with order=StochasticParameter
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0, order=iap.Choice([0, 1, 2]))
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the bottom
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 0, "y": 1},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)],
shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)],
shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the right
aug = iaa.AffineCv2(scale=1.0, translate_percent={"x": 0.3333, "y": 0},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)],
shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)],
shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the bottom
aug = iaa.AffineCv2(scale=1.0, translate_percent={"x": 0, "y": 0.3333},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)],
shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)],
shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# 0-1px to left/right and 0-1px to top/bottom
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": (-1, 1), "y": (-1, 1)},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
centers_aug = np.copy(image).astype(np.int32) * 0
centers_aug_det = np.copy(image).astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert len(observed_aug[0].nonzero()[0]) == 1
assert len(observed_aug_det[0].nonzero()[0]) == 1
centers_aug += (observed_aug[0] > 0)
centers_aug_det += (observed_aug_det[0] > 0)
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
assert (centers_aug > int(nb_iterations * (1/9 * 0.6))).all()
assert (centers_aug < int(nb_iterations * (1/9 * 1.4))).all()
aug = iaa.AffineCv2(translate_percent=iap.Uniform(0.7, 0.9))
assert isinstance(aug.translate, iap.Uniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.translate.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.translate.b.value < 0.9 + 1e-8
aug = iaa.AffineCv2(translate_px=iap.DiscreteUniform(1, 10))
assert isinstance(aug.translate, iap.DiscreteUniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert aug.translate.a.value == 1
assert aug.translate.b.value == 10
# ---------------------
# translate heatmaps
# ---------------------
heatmaps = HeatmapsOnImage(
np.float32([
[0.0, 0.5, 0.75],
[0.0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
arr_expected_1px_right = np.float32([
[0.0, 0.0, 0.5],
[0.0, 0.0, 0.5],
[0.0, 0.75, 0.75],
])
aug = iaa.AffineCv2(translate_px={"x": 1})
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert np.isclose(observed.min_value, heatmaps.min_value, rtol=0, atol=1e-6)
assert np.isclose(observed.max_value, heatmaps.max_value, rtol=0, atol=1e-6)
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# should still use mode=constant cval=0 even when other settings chosen
aug = iaa.AffineCv2(translate_px={"x": 1}, cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert np.isclose(observed.min_value, heatmaps.min_value, rtol=0, atol=1e-6)
assert np.isclose(observed.max_value, heatmaps.max_value, rtol=0, atol=1e-6)
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
aug = iaa.AffineCv2(translate_px={"x": 1}, mode="replicate", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert np.isclose(observed.min_value, heatmaps.min_value, rtol=0, atol=1e-6)
assert np.isclose(observed.max_value, heatmaps.max_value, rtol=0, atol=1e-6)
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# ---------------------
# translate segmaps
# ---------------------
segmaps = SegmentationMapsOnImage(
np.int32([
[0, 1, 2],
[0, 1, 2],
[2, 2, 2],
]),
shape=(3, 3, 3)
)
arr_expected_1px_right = np.int32([
[0, 0, 1],
[0, 0, 1],
[0, 2, 2],
])
aug = iaa.AffineCv2(translate_px={"x": 1})
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == segmaps.shape
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# should still use mode=constant cval=0 even when other settings chosen
aug = iaa.AffineCv2(translate_px={"x": 1}, cval=255)
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == segmaps.shape
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
aug = iaa.AffineCv2(translate_px={"x": 1}, mode="replicate", cval=255)
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == segmaps.shape
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# ---------------------
# rotate
# ---------------------
# rotate by 45 degrees
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=90, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, :] = 255
image_aug[0, 1] = 255
image_aug[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
kps = [ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=1)]
keypoints = [ia.KeypointsOnImage(kps, shape=base_img.shape)]
kps_aug = [ia.Keypoint(x=1, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=1, y=2)]
keypoints_aug = [ia.KeypointsOnImage(kps_aug, shape=base_img.shape)]
observed = aug.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# rotate by StochasticParameter
aug = iaa.AffineCv2(scale=1.0, translate_px=0,
rotate=iap.Uniform(10, 20), shear=0)
assert isinstance(aug.rotate, iap.Uniform)
assert isinstance(aug.rotate.a, iap.Deterministic)
assert aug.rotate.a.value == 10
assert isinstance(aug.rotate.b, iap.Deterministic)
assert aug.rotate.b.value == 20
# random rotation 0-364 degrees
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=(0, 364), shear=0)
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
pixels_sums_aug = np.copy(image).astype(np.int32) * 0
pixels_sums_aug_det = np.copy(image).astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
pixels_sums_aug += (observed_aug[0] > 100)
pixels_sums_aug_det += (observed_aug_det[0] > 100)
assert nb_changed_aug >= int(nb_iterations * 0.9)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
# outer pixels, should sometimes be white
# the values here had to be set quite tolerant, the middle pixels at
# top/left/bottom/right get more activation than expected
outer_pixels = ([0, 0, 0, 1, 1, 2, 2, 2], [0, 1, 2, 0, 2, 0, 1, 2])
assert (
pixels_sums_aug[outer_pixels] > int(nb_iterations * (2/8 * 0.4))
).all()
assert (
pixels_sums_aug[outer_pixels] < int(nb_iterations * (2/8 * 2.0))
).all()
# ---------------------
# shear
# ---------------------
# TODO
# shear by StochasticParameter
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0,
shear=iap.Uniform(10, 20))
assert isinstance(aug.shear, iap.Uniform)
assert isinstance(aug.shear.a, iap.Deterministic)
assert aug.shear.a.value == 10
assert isinstance(aug.shear.b, iap.Deterministic)
assert aug.shear.b.value == 20
# ---------------------
# cval
# ---------------------
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=128)
aug_det = aug.to_deterministic()
image = np.ones((3, 3, 1), dtype=np.uint8) * 255
image_aug = np.copy(image)
images = np.array([image])
images_list = [image]
observed = aug.augment_images(images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug_det.augment_images(images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug.augment_images(images_list)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug_det.augment_images(images_list)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
# random cvals
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=(0, 255))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
averages = []
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
averages.append(int(np.average(observed_aug)))
assert nb_changed_aug >= int(nb_iterations * 0.9)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
assert len(set(averages)) > 200
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=ia.ALL)
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=iap.DiscreteUniform(1, 5))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 1
assert aug.cval.b.value == 5
# ------------
# mode
# ------------
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode="replicate")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "replicate"
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=["replicate", "reflect"])
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "replicate" in aug.mode.a
and "reflect" in aug.mode.a)
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0,
mode=iap.Choice(["replicate", "reflect"]))
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "replicate" in aug.mode.a
and "reflect" in aug.mode.a)
# ------------
# exceptions for bad inputs
# ------------
# scale
got_exception = False
try:
_ = iaa.AffineCv2(scale=False)
except Exception:
got_exception = True
assert got_exception
# translate_px
got_exception = False
try:
_ = iaa.AffineCv2(translate_px=False)
except Exception:
got_exception = True
assert got_exception
# translate_percent
got_exception = False
try:
_ = iaa.AffineCv2(translate_percent=False)
except Exception:
got_exception = True
assert got_exception
# rotate
got_exception = False
try:
_ = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=False,
shear=0, cval=0)
except Exception:
got_exception = True
assert got_exception
# shear
got_exception = False
try:
_ = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0,
shear=False, cval=0)
except Exception:
got_exception = True
assert got_exception
# cval
got_exception = False
try:
_ = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0,
shear=0, cval=None)
except Exception:
got_exception = True
assert got_exception
# mode
got_exception = False
try:
_ = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0,
shear=0, cval=0, mode=False)
except Exception:
got_exception = True
assert got_exception
# non-existent order
got_exception = False
try:
_ = iaa.AffineCv2(order=-1)
except Exception:
got_exception = True
assert got_exception
# bad order datatype
got_exception = False
try:
_ = iaa.AffineCv2(order="test")
except Exception:
got_exception = True
assert got_exception
# ----------
# get_parameters
# ----------
aug = iaa.AffineCv2(scale=1, translate_px=2, rotate=3, shear=4,
order=1, cval=0, mode="constant")
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic) # scale
assert isinstance(params[1], iap.Deterministic) # translate
assert isinstance(params[2], iap.Deterministic) # rotate
assert isinstance(params[3], iap.Deterministic) # shear
assert params[0].value == 1 # scale
assert params[1].value == 2 # translate
assert params[2].value == 3 # rotate
assert params[3].value == 4 # shear
assert params[4].value == 1 # order
assert params[5].value == 0 # cval
assert params[6].value == "constant" # mode
class TestPiecewiseAffine(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
img = np.zeros((60, 80), dtype=np.uint8)
img[:, 9:11+1] = 255
img[:, 69:71+1] = 255
return img
@property
def mask(self):
return self.image > 0
@property
def heatmaps(self):
return HeatmapsOnImage((self.image / 255.0).astype(np.float32),
shape=(60, 80, 3))
@property
def segmaps(self):
return SegmentationMapsOnImage(self.mask.astype(np.int32),
shape=(60, 80, 3))
# -----
# __init__
# -----
def test___init___scale_is_list(self):
# scale as list
aug = iaa.PiecewiseAffine(scale=[0.01, 0.10], nb_rows=12, nb_cols=4)
assert isinstance(aug.scale, iap.Choice)
assert 0.01 - 1e-8 < aug.scale.a[0] < 0.01 + 1e-8
assert 0.10 - 1e-8 < aug.scale.a[1] < 0.10 + 1e-8
def test___init___scale_is_tuple(self):
# scale as tuple
aug = iaa.PiecewiseAffine(scale=(0.01, 0.10), nb_rows=12, nb_cols=4)
assert isinstance(aug.jitter.scale, iap.Uniform)
assert isinstance(aug.jitter.scale.a, iap.Deterministic)
assert isinstance(aug.jitter.scale.b, iap.Deterministic)
assert 0.01 - 1e-8 < aug.jitter.scale.a.value < 0.01 + 1e-8
assert 0.10 - 1e-8 < aug.jitter.scale.b.value < 0.10 + 1e-8
def test___init___scale_is_stochastic_parameter(self):
# scale as StochasticParameter
aug = iaa.PiecewiseAffine(scale=iap.Uniform(0.01, 0.10), nb_rows=12,
nb_cols=4)
assert isinstance(aug.jitter.scale, iap.Uniform)
assert isinstance(aug.jitter.scale.a, iap.Deterministic)
assert isinstance(aug.jitter.scale.b, iap.Deterministic)
assert 0.01 - 1e-8 < aug.jitter.scale.a.value < 0.01 + 1e-8
assert 0.10 - 1e-8 < aug.jitter.scale.b.value < 0.10 + 1e-8
def test___init___bad_datatype_for_scale_leads_to_failure(self):
# bad datatype for scale
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=False, nb_rows=12, nb_cols=4)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___nb_rows_is_list(self):
# rows as list
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=[4, 20], nb_cols=4)
assert isinstance(aug.nb_rows, iap.Choice)
assert aug.nb_rows.a[0] == 4
assert aug.nb_rows.a[1] == 20
def test___init___nb_rows_is_tuple(self):
# rows as tuple
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=(4, 20), nb_cols=4)
assert isinstance(aug.nb_rows, iap.DiscreteUniform)
assert isinstance(aug.nb_rows.a, iap.Deterministic)
assert isinstance(aug.nb_rows.b, iap.Deterministic)
assert aug.nb_rows.a.value == 4
assert aug.nb_rows.b.value == 20
def test___init___nb_rows_is_stochastic_parameter(self):
# rows as StochasticParameter
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=iap.DiscreteUniform(4, 20),
nb_cols=4)
assert isinstance(aug.nb_rows, iap.DiscreteUniform)
assert isinstance(aug.nb_rows.a, iap.Deterministic)
assert isinstance(aug.nb_rows.b, iap.Deterministic)
assert aug.nb_rows.a.value == 4
assert aug.nb_rows.b.value == 20
def test___init___bad_datatype_for_nb_rows_leads_to_failure(self):
# bad datatype for rows
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=0.05, nb_rows=False, nb_cols=4)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___nb_cols_is_list(self):
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=[4, 20])
assert isinstance(aug.nb_cols, iap.Choice)
assert aug.nb_cols.a[0] == 4
assert aug.nb_cols.a[1] == 20
def test___init___nb_cols_is_tuple(self):
# cols as tuple
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=(4, 20))
assert isinstance(aug.nb_cols, iap.DiscreteUniform)
assert isinstance(aug.nb_cols.a, iap.Deterministic)
assert isinstance(aug.nb_cols.b, iap.Deterministic)
assert aug.nb_cols.a.value == 4
assert aug.nb_cols.b.value == 20
def test___init___nb_cols_is_stochastic_parameter(self):
# cols as StochasticParameter
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4,
nb_cols=iap.DiscreteUniform(4, 20))
assert isinstance(aug.nb_cols, iap.DiscreteUniform)
assert isinstance(aug.nb_cols.a, iap.Deterministic)
assert isinstance(aug.nb_cols.b, iap.Deterministic)
assert aug.nb_cols.a.value == 4
assert aug.nb_cols.b.value == 20
def test___init___bad_datatype_for_nb_cols_leads_to_failure(self):
# bad datatype for cols
got_exception = False
try:
_aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___order_is_int(self):
# single int for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, order=0)
assert isinstance(aug.order, iap.Deterministic)
assert aug.order.value == 0
def test___init___order_is_list(self):
# list for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
order=[0, 1, 3])
assert isinstance(aug.order, iap.Choice)
assert all([v in aug.order.a for v in [0, 1, 3]])
def test___init___order_is_stochastic_parameter(self):
# StochasticParameter for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
order=iap.Choice([0, 1, 3]))
assert isinstance(aug.order, iap.Choice)
assert all([v in aug.order.a for v in [0, 1, 3]])
def test___init___order_is_all(self):
# ALL for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
order=ia.ALL)
assert isinstance(aug.order, iap.Choice)
assert all([v in aug.order.a for v in [0, 1, 3, 4, 5]])
def test___init___bad_datatype_for_order_leads_to_failure(self):
# bad datatype for order
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
order=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___cval_is_list(self):
# cval as list
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=5, nb_cols=5,
mode="constant", cval=[0, 10])
assert isinstance(aug.cval, iap.Choice)
assert aug.cval.a[0] == 0
assert aug.cval.a[1] == 10
def test___init___cval_is_tuple(self):
# cval as tuple
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode="constant", cval=(0, 10))
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 10
def test___init___cval_is_stochastic_parameter(self):
# cval as StochasticParameter
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode="constant",
cval=iap.DiscreteUniform(0, 10))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 10
def test___init___cval_is_all(self):
# ALL as cval
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode="constant", cval=ia.ALL)
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
def test___init___bad_datatype_for_cval_leads_to_failure(self):
# bas datatype for cval
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, cval=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___mode_is_string(self):
# single string for mode
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode="nearest")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "nearest"
def test___init___mode_is_list(self):
# list for mode
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode=["nearest", "edge", "symmetric"])
assert isinstance(aug.mode, iap.Choice)
assert all([
v in aug.mode.a for v in ["nearest", "edge", "symmetric"]
])
def test___init___mode_is_stochastic_parameter(self):
# StochasticParameter for mode
aug = iaa.PiecewiseAffine(
scale=0.1, nb_rows=8, nb_cols=8,
mode=iap.Choice(["nearest", "edge", "symmetric"]))
assert isinstance(aug.mode, iap.Choice)
assert all([
v in aug.mode.a for v in ["nearest", "edge", "symmetric"]
])
def test___init___mode_is_all(self):
# ALL for mode
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
assert all([
v in aug.mode.a
for v
in ["constant", "edge", "symmetric", "reflect", "wrap"]
])
def test___init___bad_datatype_for_mode_leads_to_failure(self):
# bad datatype for mode
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# -----
# scale
# -----
def test_scale_is_small_image(self):
# basic test
aug = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
observed = aug.augment_image(self.image)
assert (
100.0
< np.average(observed[self.mask])
< np.average(self.image[self.mask])
)
assert (
100.0-75.0
> np.average(observed[~self.mask])
> np.average(self.image[~self.mask])
)
def test_scale_is_small_image_absolute_scale(self):
aug = iaa.PiecewiseAffine(scale=1, nb_rows=12, nb_cols=4,
absolute_scale=True)
observed = aug.augment_image(self.image)
assert (
100.0
< np.average(observed[self.mask])
< np.average(self.image[self.mask])
)
assert (
100.0-75.0
> np.average(observed[~self.mask])
> np.average(self.image[~self.mask])
)
def test_scale_is_small_heatmaps(self):
# basic test, heatmaps
aug = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
observed = aug.augment_heatmaps([self.heatmaps])[0]
observed_arr = observed.get_arr()
assert observed.shape == self.heatmaps.shape
_assert_same_min_max(observed, self.heatmaps)
assert (
100.0/255.0
< np.average(observed_arr[self.mask])
< np.average(self.heatmaps.get_arr()[self.mask]))
assert (
(100.0-75.0)/255.0
> np.average(observed_arr[~self.mask])
> np.average(self.heatmaps.get_arr()[~self.mask]))
def test_scale_is_small_segmaps(self):
# basic test, segmaps
aug = iaa.PiecewiseAffine(scale=0.001, nb_rows=12, nb_cols=4)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
observed_arr = observed.get_arr()
# left column starts at 9-11 and right one at 69-71
# result is 9-11 (curvy, i.e. like 50% filled) and 70-71 (straight,
# i.e. 100% filled). Reason for that is unclear, maybe a scikit-image
# problem.
observed_arr_left_col = observed_arr[:, 9:11+1]
observed_arr_right_col = observed_arr[:, 69:71+1]
assert observed.shape == self.segmaps.shape
assert np.average(observed_arr_left_col == 1) > 0.5
assert np.average(observed_arr_right_col == 1) > 0.5
assert np.average(observed_arr[~self.mask] == 0) > 0.9
def test_scale_is_zero_image(self):
# scale 0
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)
observed = aug.augment_image(self.image)
assert np.array_equal(observed, self.image)
def test_scale_is_zero_image_absolute_scale(self):
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4,
absolute_scale=True)
observed = aug.augment_image(self.image)
assert np.array_equal(observed, self.image)
def test_scale_is_zero_heatmaps(self):
# scale 0, heatmaps
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)
observed = aug.augment_heatmaps([self.heatmaps])[0]
observed_arr = observed.get_arr()
assert observed.shape == self.heatmaps.shape
_assert_same_min_max(observed, self.heatmaps)
assert np.array_equal(observed_arr, self.heatmaps.get_arr())
def test_scale_is_zero_segmaps(self):
# scale 0, segmaps
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
observed_arr = observed.get_arr()
assert observed.shape == self.segmaps.shape
assert np.array_equal(observed_arr, self.segmaps.get_arr())
def test_scale_is_zero_keypoints(self):
# scale 0, keypoints
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)
kps = [ia.Keypoint(x=5, y=3), ia.Keypoint(x=3, y=8)]
kpsoi = ia.KeypointsOnImage(kps, shape=(14, 14, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
assert_cbaois_equal(kpsoi_aug, kpsoi)
@classmethod
def _test_scale_is_zero_cbaoi(cls, cbaoi, augf_name):
aug = iaa.PiecewiseAffine(scale=0, nb_rows=10, nb_cols=10)
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi)
def test_scale_is_zero_polygons(self):
exterior = [(10, 10),
(70, 10), (70, 20), (70, 30), (70, 40),
(70, 50), (70, 60), (70, 70), (70, 80),
(70, 90),
(10, 90),
(10, 80), (10, 70), (10, 60), (10, 50),
(10, 40), (10, 30), (10, 20), (10, 10)]
poly = ia.Polygon(exterior)
psoi = ia.PolygonsOnImage([poly, poly.shift(left=1, top=1)],
shape=(100, 80))
self._test_scale_is_zero_cbaoi(psoi, "augment_polygons")
def test_scale_is_zero_line_strings(self):
coords = [(10, 10),
(70, 10), (70, 20), (70, 30), (70, 40),
(70, 50), (70, 60), (70, 70), (70, 80),
(70, 90),
(10, 90),
(10, 80), (10, 70), (10, 60), (10, 50),
(10, 40), (10, 30), (10, 20), (10, 10)]
ls = ia.LineString(coords)
lsoi = ia.LineStringsOnImage([ls, ls.shift(left=1, top=1)],
shape=(100, 80))
self._test_scale_is_zero_cbaoi(lsoi, "augment_line_strings")
def test_scale_is_zero_bounding_boxes(self):
bb = ia.BoundingBox(x1=10, y1=10, x2=70, y2=20)
bbsoi = ia.BoundingBoxesOnImage([bb, bb.shift(left=1, top=1)],
shape=(100, 80))
self._test_scale_is_zero_cbaoi(bbsoi, "augment_bounding_boxes")
def test_scale_stronger_values_should_increase_changes_images(self):
# stronger scale should lead to stronger changes
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
observed1 = aug1.augment_image(self.image)
observed2 = aug2.augment_image(self.image)
assert (
np.average(observed1[~self.mask])
< np.average(observed2[~self.mask])
)
def test_scale_stronger_values_should_increase_changes_images_abs(self):
aug1 = iaa.PiecewiseAffine(scale=1, nb_rows=12, nb_cols=4,
absolute_scale=True)
aug2 = iaa.PiecewiseAffine(scale=10, nb_rows=12, nb_cols=4,
absolute_scale=True)
observed1 = aug1.augment_image(self.image)
observed2 = aug2.augment_image(self.image)
assert (
np.average(observed1[~self.mask])
< np.average(observed2[~self.mask])
)
def test_scale_stronger_values_should_increase_changes_heatmaps(self):
# stronger scale should lead to stronger changes, heatmaps
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
observed1 = aug1.augment_heatmaps([self.heatmaps])[0]
observed2 = aug2.augment_heatmaps([self.heatmaps])[0]
observed1_arr = observed1.get_arr()
observed2_arr = observed2.get_arr()
assert observed1.shape == self.heatmaps.shape
assert observed2.shape == self.heatmaps.shape
_assert_same_min_max(observed1, self.heatmaps)
_assert_same_min_max(observed2, self.heatmaps)
assert (
np.average(observed1_arr[~self.mask])
< np.average(observed2_arr[~self.mask])
)
def test_scale_stronger_values_should_increase_changes_heatmaps_abs(self):
aug1 = iaa.PiecewiseAffine(scale=1, nb_rows=12, nb_cols=4,
absolute_scale=True)
aug2 = iaa.PiecewiseAffine(scale=10, nb_rows=12, nb_cols=4,
absolute_scale=True)
observed1 = aug1.augment_heatmaps([self.heatmaps])[0]
observed2 = aug2.augment_heatmaps([self.heatmaps])[0]
observed1_arr = observed1.get_arr()
observed2_arr = observed2.get_arr()
assert observed1.shape == self.heatmaps.shape
assert observed2.shape == self.heatmaps.shape
_assert_same_min_max(observed1, self.heatmaps)
_assert_same_min_max(observed2, self.heatmaps)
assert (
np.average(observed1_arr[~self.mask])
< np.average(observed2_arr[~self.mask])
)
def test_scale_stronger_values_should_increase_changes_segmaps(self):
# stronger scale should lead to stronger changes, segmaps
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
observed1 = aug1.augment_segmentation_maps([self.segmaps])[0]
observed2 = aug2.augment_segmentation_maps([self.segmaps])[0]
observed1_arr = observed1.get_arr()
observed2_arr = observed2.get_arr()
assert observed1.shape == self.segmaps.shape
assert observed2.shape == self.segmaps.shape
assert (
np.average(observed1_arr[~self.mask] == 0)
> np.average(observed2_arr[~self.mask] == 0)
)
def test_scale_alignment_between_images_and_heatmaps(self):
# strong scale, measure alignment between images and heatmaps
aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(self.image)
hm_aug = aug_det.augment_heatmaps([self.heatmaps])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = hm_aug.arr_0to1 > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (60, 80, 3)
_assert_same_min_max(hm_aug, self.heatmaps)
assert (same / img_aug_mask.size) >= 0.98
def test_scale_alignment_between_images_and_segmaps(self):
# strong scale, measure alignment between images and segmaps
aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(self.image)
segmap_aug = aug_det.augment_segmentation_maps([self.segmaps])[0]
img_aug_mask = (img_aug > 255*0.1)
segmap_aug_mask = (segmap_aug.arr == 1)
same = np.sum(img_aug_mask == segmap_aug_mask[:, :, 0])
assert segmap_aug.shape == (60, 80, 3)
assert (same / img_aug_mask.size) >= 0.9
def test_scale_alignment_between_images_and_smaller_heatmaps(self):
# strong scale, measure alignment between images and heatmaps
# heatmaps here smaller than image
aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug_det = aug.to_deterministic()
heatmaps_small = ia.HeatmapsOnImage(
(
ia.imresize_single_image(
self.image, (30, 40+10), interpolation="cubic"
) / 255.0
).astype(np.float32),
shape=(60, 80, 3)
)
img_aug = aug_det.augment_image(self.image)
hm_aug = aug_det.augment_heatmaps([heatmaps_small])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, (60, 80), interpolation="cubic"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (60, 80, 3)
assert hm_aug.arr_0to1.shape == (30, 40+10, 1)
assert (same / img_aug_mask.size) >= 0.9 # seems to be 0.948 actually
def test_scale_alignment_between_images_and_smaller_heatmaps_abs(self):
# image is 60x80, so a scale of 8 is about 0.1*max(60,80)
aug = iaa.PiecewiseAffine(scale=8, nb_rows=12, nb_cols=4,
absolute_scale=True)
aug_det = aug.to_deterministic()
heatmaps_small = ia.HeatmapsOnImage(
(
ia.imresize_single_image(
self.image, (30, 40+10), interpolation="cubic"
) / 255.0
).astype(np.float32),
shape=(60, 80, 3)
)
img_aug = aug_det.augment_image(self.image)
hm_aug = aug_det.augment_heatmaps([heatmaps_small])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, (60, 80), interpolation="cubic"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (60, 80, 3)
assert hm_aug.arr_0to1.shape == (30, 40+10, 1)
assert (same / img_aug_mask.size) >= 0.9 # seems to be 0.930 actually
def test_scale_alignment_between_images_and_smaller_segmaps(self):
# strong scale, measure alignment between images and segmaps
# segmaps here smaller than image
aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug_det = aug.to_deterministic()
segmaps_small = SegmentationMapsOnImage(
(
ia.imresize_single_image(
self.image, (30, 40+10), interpolation="cubic"
) > 100
).astype(np.int32),
shape=(60, 80, 3)
)
img_aug = aug_det.augment_image(self.image)
segmaps_aug = aug_det.augment_segmentation_maps([segmaps_small])[0]
img_aug_mask = img_aug > 255*0.1
segmaps_aug_mask = (
ia.imresize_single_image(
segmaps_aug.arr, (60, 80),
interpolation="nearest"
) == 1
)
same = np.sum(img_aug_mask == segmaps_aug_mask[:, :, 0])
assert segmaps_aug.shape == (60, 80, 3)
assert segmaps_aug.arr.shape == (30, 40+10, 1)
assert (same / img_aug_mask.size) >= 0.9
def test_scale_alignment_between_images_and_keypoints(self):
# strong scale, measure alignment between images and keypoints
aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug_det = aug.to_deterministic()
kps = [ia.Keypoint(x=5, y=15), ia.Keypoint(x=17, y=12)]
kpsoi = ia.KeypointsOnImage(kps, shape=(24, 30, 3))
img_kps = np.zeros((24, 30, 3), dtype=np.uint8)
img_kps = kpsoi.draw_on_image(img_kps, color=[255, 255, 255])
img_kps_aug = aug_det.augment_image(img_kps)
kpsoi_aug = aug_det.augment_keypoints([kpsoi])[0]
assert kpsoi_aug.shape == (24, 30, 3)
bb1 = ia.BoundingBox(
x1=kpsoi_aug.keypoints[0].x-1, y1=kpsoi_aug.keypoints[0].y-1,
x2=kpsoi_aug.keypoints[0].x+1, y2=kpsoi_aug.keypoints[0].y+1)
bb2 = ia.BoundingBox(
x1=kpsoi_aug.keypoints[1].x-1, y1=kpsoi_aug.keypoints[1].y-1,
x2=kpsoi_aug.keypoints[1].x+1, y2=kpsoi_aug.keypoints[1].y+1)
patch1 = bb1.extract_from_image(img_kps_aug)
patch2 = bb2.extract_from_image(img_kps_aug)
assert np.max(patch1) > 150
assert np.max(patch2) > 150
assert np.average(img_kps_aug) < 40
# this test was apparently added later on (?) without noticing that
# a similar test already existed
def test_scale_alignment_between_images_and_keypoints2(self):
img = np.zeros((100, 80), dtype=np.uint8)
img[:, 9:11+1] = 255
img[:, 69:71+1] = 255
kps = [ia.Keypoint(x=10, y=20), ia.Keypoint(x=10, y=40),
ia.Keypoint(x=70, y=20), ia.Keypoint(x=70, y=40)]
kpsoi = ia.KeypointsOnImage(kps, shape=img.shape)
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
aug_det = aug.to_deterministic()
observed_img = aug_det.augment_image(img)
observed_kpsoi = aug_det.augment_keypoints([kpsoi])
assert not keypoints_equal([kpsoi], observed_kpsoi)
for kp in observed_kpsoi[0].keypoints:
assert observed_img[int(kp.y), int(kp.x)] > 0
@classmethod
def _test_scale_alignment_between_images_and_poly_or_line_strings(
cls, cba_class, cbaoi_class, augf_name):
img = np.zeros((100, 80), dtype=np.uint8)
img[:, 10-5:10+5] = 255
img[:, 70-5:70+5] = 255
coords = [(10, 10),
(70, 10), (70, 20), (70, 30), (70, 40),
(70, 50), (70, 60), (70, 70), (70, 80),
(70, 90),
(10, 90),
(10, 80), (10, 70), (10, 60), (10, 50),
(10, 40), (10, 30), (10, 20), (10, 10)]
cba = cba_class(coords)
cbaoi = cbaoi_class([cba, cba.shift(left=1, top=1)],
shape=img.shape)
aug = iaa.PiecewiseAffine(scale=0.03, nb_rows=10, nb_cols=10)
aug_det = aug.to_deterministic()
observed_imgs = aug_det.augment_images([img, img])
observed_cbaois = getattr(aug_det, augf_name)([cbaoi, cbaoi])
for observed_img, observed_cbaoi in zip(observed_imgs, observed_cbaois):
assert observed_cbaoi.shape == img.shape
for cba_aug in observed_cbaoi.items:
if hasattr(cba_aug, "is_valid"):
assert cba_aug.is_valid
for point_aug in cba_aug.coords:
x = int(np.round(point_aug[0]))
y = int(np.round(point_aug[1]))
assert observed_img[y, x] > 0
def test_scale_alignment_between_images_and_polygons(self):
self._test_scale_alignment_between_images_and_poly_or_line_strings(
ia.Polygon, ia.PolygonsOnImage, "augment_polygons")
def test_scale_alignment_between_images_and_line_strings(self):
self._test_scale_alignment_between_images_and_poly_or_line_strings(
ia.LineString, ia.LineStringsOnImage, "augment_line_strings")
def test_scale_alignment_between_images_and_bounding_boxes(self):
img = np.zeros((100, 80), dtype=np.uint8)
s = 0
img[10-s:10+s+1, 20-s:20+s+1] = 255
img[60-s:60+s+1, 70-s:70+s+1] = 255
bb = ia.BoundingBox(y1=10, x1=20, y2=60, x2=70)
bbsoi = ia.BoundingBoxesOnImage([bb], shape=img.shape)
aug = iaa.PiecewiseAffine(scale=0.03, nb_rows=10, nb_cols=10)
observed_imgs, observed_bbsois = aug(
images=[img], bounding_boxes=[bbsoi])
for observed_img, observed_bbsoi in zip(observed_imgs, observed_bbsois):
assert observed_bbsoi.shape == img.shape
observed_img_x = np.max(observed_img, axis=0)
observed_img_y = np.max(observed_img, axis=1)
nonz_x = np.nonzero(observed_img_x)[0]
nonz_y = np.nonzero(observed_img_y)[0]
img_x1 = min(nonz_x)
img_x2 = max(nonz_x)
img_y1 = min(nonz_y)
img_y2 = max(nonz_y)
expected = ia.BoundingBox(x1=img_x1, y1=img_y1,
x2=img_x2, y2=img_y2)
for bb_aug in observed_bbsoi.bounding_boxes:
# we don't expect perfect IoU here, because the actual
# underlying KP aug used distance maps
# most IoUs seem to end up in the range 0.9-0.95
assert bb_aug.iou(expected) > 0.8
def test_scale_is_list(self):
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug = iaa.PiecewiseAffine(scale=[0.01, 0.10], nb_rows=12, nb_cols=4)
avg1 = np.average([
np.average(
aug1.augment_image(self.image)
* (~self.mask).astype(np.float32)
)
for _ in sm.xrange(3)
])
avg2 = np.average([
np.average(
aug2.augment_image(self.image)
* (~self.mask).astype(np.float32)
)
for _ in sm.xrange(3)
])
seen = [0, 0]
for _ in sm.xrange(15):
observed = aug.augment_image(self.image)
avg = np.average(observed * (~self.mask).astype(np.float32))
diff1 = abs(avg - avg1)
diff2 = abs(avg - avg2)
if diff1 < diff2:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 0
assert seen[1] > 0
# -----
# rows and cols
# -----
@classmethod
def _compute_observed_std_ygrad_in_mask(cls, observed, mask):
grad_vert = (
observed[1:, :].astype(np.float32)
- observed[:-1, :].astype(np.float32)
)
grad_vert = grad_vert * (~mask[1:, :]).astype(np.float32)
return np.std(grad_vert)
def _compute_std_ygrad_in_mask(self, aug, image, mask, nb_iterations):
stds = []
for _ in sm.xrange(nb_iterations):
observed = aug.augment_image(image)
stds.append(
self._compute_observed_std_ygrad_in_mask(observed, mask)
)
return np.average(stds)
def test_nb_rows_affects_images(self):
# verify effects of rows
aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=30, nb_cols=4)
std1 = self._compute_std_ygrad_in_mask(aug1, self.image, self.mask, 3)
std2 = self._compute_std_ygrad_in_mask(aug2, self.image, self.mask, 3)
assert std1 < std2
def test_nb_rows_is_list_affects_images(self):
# rows as list
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=[4, 20], nb_cols=4)
aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=30, nb_cols=4)
std1 = self._compute_std_ygrad_in_mask(aug1, self.image, self.mask, 3)
std2 = self._compute_std_ygrad_in_mask(aug2, self.image, self.mask, 3)
seen = [0, 0]
for _ in sm.xrange(20):
observed = aug.augment_image(self.image)
std = self._compute_observed_std_ygrad_in_mask(observed, self.mask)
diff1 = abs(std - std1)
diff2 = abs(std - std2)
if diff1 < diff2:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 0
assert seen[1] > 0
def test_nb_cols_affects_images(self):
# verify effects of cols
image = self.image.T
mask = self.mask.T
aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=20, nb_cols=4)
std1 = self._compute_std_ygrad_in_mask(aug1, image, mask, 3)
std2 = self._compute_std_ygrad_in_mask(aug2, image, mask, 3)
assert std1 < std2
def test_nb_cols_is_list_affects_images(self):
# cols as list
image = self.image.T
mask = self.mask.T
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=[4, 20])
aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=30)
std1 = self._compute_std_ygrad_in_mask(aug1, image, mask, 3)
std2 = self._compute_std_ygrad_in_mask(aug2, image, mask, 3)
seen = [0, 0]
for _ in sm.xrange(20):
observed = aug.augment_image(image)
std = self._compute_observed_std_ygrad_in_mask(observed, mask)
diff1 = abs(std - std1)
diff2 = abs(std - std2)
if diff1 < diff2:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 0
assert seen[1] > 0
# -----
# order
# -----
# TODO
# -----
# cval
# -----
def test_cval_is_zero(self):
# cval as deterministic
img = np.zeros((50, 50, 3), dtype=np.uint8) + 255
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=10, nb_cols=10,
mode="constant", cval=0)
observed = aug.augment_image(img)
assert np.sum([observed[:, :] == [0, 0, 0]]) > 0
def test_cval_should_be_ignored_by_heatmaps(self):
# cval as deterministic, heatmaps should always use cval=0
heatmaps = HeatmapsOnImage(
np.zeros((50, 50, 1), dtype=np.float32), shape=(50, 50, 3))
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=10, nb_cols=10,
mode="constant", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert np.sum([observed.get_arr()[:, :] >= 0.01]) == 0
def test_cval_should_be_ignored_by_segmaps(self):
# cval as deterministic, segmaps should always use cval=0
segmaps = SegmentationMapsOnImage(
np.zeros((50, 50, 1), dtype=np.int32), shape=(50, 50, 3))
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=10, nb_cols=10,
mode="constant", cval=255)
observed = aug.augment_segmentation_maps([segmaps])[0]
assert np.sum([observed.get_arr()[:, :] > 0]) == 0
def test_cval_is_list(self):
# cval as list
img = np.zeros((20, 20), dtype=np.uint8) + 255
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=5, nb_cols=5,
mode="constant", cval=[0, 10])
seen = [0, 0, 0]
for _ in sm.xrange(30):
observed = aug.augment_image(img)
nb_0 = np.sum([observed[:, :] == 0])
nb_10 = np.sum([observed[:, :] == 10])
if nb_0 > 0:
seen[0] += 1
elif nb_10 > 0:
seen[1] += 1
else:
seen[2] += 1
assert seen[0] > 5
assert seen[1] > 5
assert seen[2] <= 4
# -----
# mode
# -----
# TODO
# ---------
# remaining keypoints tests
# ---------
def test_keypoints_outside_of_image(self):
# keypoints outside of image
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
kps = [ia.Keypoint(x=-10, y=-20)]
kpsoi = ia.KeypointsOnImage(kps, shape=(10, 10, 3))
observed = aug.augment_keypoints(kpsoi)
assert_cbaois_equal(observed, kpsoi)
def test_keypoints_empty(self):
# empty keypoints
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
kpsoi = ia.KeypointsOnImage([], shape=(10, 10, 3))
observed = aug.augment_keypoints(kpsoi)
assert_cbaois_equal(observed, kpsoi)
# ---------
# remaining polygons tests
# ---------
def test_polygons_outside_of_image(self):
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=10, nb_cols=10)
exterior = [(-10, -10), (110, -10), (110, 90), (-10, 90)]
poly = ia.Polygon(exterior)
psoi = ia.PolygonsOnImage([poly], shape=(10, 10, 3))
observed = aug.augment_polygons(psoi)
assert_cbaois_equal(observed, psoi)
def test_empty_polygons(self):
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
psoi = ia.PolygonsOnImage([], shape=(10, 10, 3))
observed = aug.augment_polygons(psoi)
assert_cbaois_equal(observed, psoi)
# ---------
# remaining line string tests
# ---------
def test_line_strings_outside_of_image(self):
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=10, nb_cols=10)
coords = [(-10, -10), (110, -10), (110, 90), (-10, 90)]
ls = ia.LineString(coords)
lsoi = ia.LineStringsOnImage([ls], shape=(10, 10, 3))
observed = aug.augment_line_strings(lsoi)
assert_cbaois_equal(observed, lsoi)
def test_empty_line_strings(self):
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
lsoi = ia.LineStringsOnImage([], shape=(10, 10, 3))
observed = aug.augment_line_strings(lsoi)
assert_cbaois_equal(observed, lsoi)
# ---------
# remaining bounding box tests
# ---------
def test_bounding_boxes_outside_of_image(self):
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=10, nb_cols=10)
bbs = ia.BoundingBox(x1=-10, y1=-10, x2=15, y2=15)
bbsoi = ia.BoundingBoxesOnImage([bbs], shape=(10, 10, 3))
observed = aug.augment_bounding_boxes(bbsoi)
assert_cbaois_equal(observed, bbsoi)
def test_empty_bounding_boxes(self):
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
bbsoi = ia.BoundingBoxesOnImage([], shape=(10, 10, 3))
observed = aug.augment_bounding_boxes(bbsoi)
assert_cbaois_equal(observed, bbsoi)
# ---------
# zero-sized axes
# ---------
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=2, nb_cols=2)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_zero_sized_axes_absolute_scale(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.PiecewiseAffine(scale=5, nb_rows=2, nb_cols=2,
absolute_scale=True)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
# ---------
# other methods
# ---------
def test_get_parameters(self):
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=10, order=1,
cval=2, mode="constant",
absolute_scale=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert isinstance(params[3], iap.Deterministic)
assert isinstance(params[4], iap.Deterministic)
assert isinstance(params[5], iap.Deterministic)
assert params[6] is False
assert 0.1 - 1e-8 < params[0].value < 0.1 + 1e-8
assert params[1].value == 8
assert params[2].value == 10
assert params[3].value == 1
assert params[4].value == 2
assert params[5].value == "constant"
# ---------
# other dtypes
# ---------
@property
def other_dtypes_mask(self):
mask = np.zeros((21, 21), dtype=bool)
mask[:, 7:13] = True
return mask
def test_other_dtypes_bool(self):
aug = iaa.PiecewiseAffine(scale=0.2, nb_rows=8, nb_cols=4, order=0,
mode="constant")
image = np.zeros((21, 21), dtype=bool)
image[self.other_dtypes_mask] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert not np.all(image_aug == 1)
assert np.any(image_aug[~self.other_dtypes_mask] == 1)
def test_other_dtypes_uint_int(self):
aug = iaa.PiecewiseAffine(scale=0.2, nb_rows=8, nb_cols=4, order=0,
mode="constant")
dtypes = ["uint8", "uint16", "uint32", "int8", "int16", "int32"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value-100, max_value]
values = values + [(-1)*value for value in values]
else:
values = [1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value-100, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((21, 21), dtype=dtype)
image[:, 7:13] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert not np.all(image_aug == value)
assert np.any(image_aug[~self.other_dtypes_mask] == value)
def test_other_dtypes_float(self):
aug = iaa.PiecewiseAffine(scale=0.2, nb_rows=8, nb_cols=4, order=0,
mode="constant")
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((21, 21), dtype=dtype)
image[:, 7:13] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
# TODO switch all other tests from float(...) to
# np.float128(...) pattern, seems to be more accurate
# for 128bit floats
assert not np.all(_isclose(image_aug, np.float128(value)))
assert np.any(_isclose(image_aug[~self.other_dtypes_mask],
np.float128(value)))
class TestPerspectiveTransform(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
img = np.zeros((30, 30), dtype=np.uint8)
img[10:20, 10:20] = 255
return img
@property
def heatmaps(self):
return HeatmapsOnImage((self.image / 255.0).astype(np.float32),
shape=self.image.shape)
@property
def segmaps(self):
return SegmentationMapsOnImage((self.image > 0).astype(np.int32),
shape=self.image.shape)
# --------
# __init__
# --------
def test___init___scale_is_tuple(self):
# tuple for scale
aug = iaa.PerspectiveTransform(scale=(0.1, 0.2))
assert isinstance(aug.jitter.scale, iap.Uniform)
assert isinstance(aug.jitter.scale.a, iap.Deterministic)
assert isinstance(aug.jitter.scale.b, iap.Deterministic)
assert 0.1 - 1e-8 < aug.jitter.scale.a.value < 0.1 + 1e-8
assert 0.2 - 1e-8 < aug.jitter.scale.b.value < 0.2 + 1e-8
def test___init___scale_is_list(self):
# list for scale
aug = iaa.PerspectiveTransform(scale=[0.1, 0.2, 0.3])
assert isinstance(aug.jitter.scale, iap.Choice)
assert len(aug.jitter.scale.a) == 3
assert 0.1 - 1e-8 < aug.jitter.scale.a[0] < 0.1 + 1e-8
assert 0.2 - 1e-8 < aug.jitter.scale.a[1] < 0.2 + 1e-8
assert 0.3 - 1e-8 < aug.jitter.scale.a[2] < 0.3 + 1e-8
def test___init___scale_is_stochastic_parameter(self):
# StochasticParameter for scale
aug = iaa.PerspectiveTransform(scale=iap.Choice([0.1, 0.2, 0.3]))
assert isinstance(aug.jitter.scale, iap.Choice)
assert len(aug.jitter.scale.a) == 3
assert 0.1 - 1e-8 < aug.jitter.scale.a[0] < 0.1 + 1e-8
assert 0.2 - 1e-8 < aug.jitter.scale.a[1] < 0.2 + 1e-8
assert 0.3 - 1e-8 < aug.jitter.scale.a[2] < 0.3 + 1e-8
def test___init___bad_datatype_for_scale_leads_to_failure(self):
# bad datatype for scale
got_exception = False
try:
_ = iaa.PerspectiveTransform(scale=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___mode_is_all(self):
aug = iaa.PerspectiveTransform(cval=0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
def test___init___mode_is_string(self):
aug = iaa.PerspectiveTransform(cval=0, mode="replicate")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "replicate"
def test___init___mode_is_list(self):
aug = iaa.PerspectiveTransform(cval=0, mode=["replicate", "constant"])
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "replicate" in aug.mode.a
and "constant" in aug.mode.a)
def test___init___mode_is_stochastic_parameter(self):
aug = iaa.PerspectiveTransform(
cval=0, mode=iap.Choice(["replicate", "constant"]))
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "replicate" in aug.mode.a
and "constant" in aug.mode.a)
# --------
# image, heatmaps, segmaps
# --------
def test_image_without_keep_size(self):
# without keep_size
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_image(self.image)
y1 = int(30*0.2)
y2 = int(30*0.8)
x1 = int(30*0.2)
x2 = int(30*0.8)
expected = self.image[y1:y2, x1:x2]
assert all([
abs(s1-s2) <= 1 for s1, s2 in zip(observed.shape, expected.shape)
])
if observed.shape != expected.shape:
observed = ia.imresize_single_image(
observed, expected.shape[0:2], interpolation="cubic")
# differences seem to mainly appear around the border of the inner
# rectangle, possibly due to interpolation
assert np.average(
np.abs(observed.astype(np.int32) - expected.astype(np.int32))
) < 30.0
def test_image_heatmaps_alignment_without_keep_size(self):
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
hm = HeatmapsOnImage(
self.image.astype(np.float32)/255.0,
shape=(30, 30)
)
observed = aug.augment_image(self.image)
hm_aug = aug.augment_heatmaps([hm])[0]
y1 = int(30*0.2)
y2 = int(30*0.8)
x1 = int(30*0.2)
x2 = int(30*0.8)
expected = (y2 - y1, x2 - x1)
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(hm_aug.shape, expected)
])
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(hm_aug.arr_0to1.shape, expected + (1,))
])
img_aug_mask = observed > 255*0.1
hm_aug_mask = hm_aug.arr_0to1 > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.99
def test_image_segmaps_alignment_without_keep_size(self):
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
segmaps = SegmentationMapsOnImage(
(self.image > 100).astype(np.int32),
shape=(30, 30)
)
observed = aug.augment_image(self.image)
segmaps_aug = aug.augment_segmentation_maps([segmaps])[0]
y1 = int(30*0.2)
y2 = int(30*0.8)
x1 = int(30*0.2)
x2 = int(30*0.8)
expected = (y2 - y1, x2 - x1)
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(segmaps_aug.shape, expected)
])
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(segmaps_aug.arr.shape, expected + (1,))
])
img_aug_mask = observed > 255*0.5
segmaps_aug_mask = segmaps_aug.arr > 0
same = np.sum(img_aug_mask == segmaps_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.99
def test_heatmaps_smaller_than_image_without_keep_size(self):
# without keep_size, different heatmap size
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
y1 = int(30*0.2)
y2 = int(30*0.8)
x1 = int(30*0.2)
x2 = int(30*0.8)
x1_small = int(25*0.2)
x2_small = int(25*0.8)
y1_small = int(20*0.2)
y2_small = int(20*0.8)
img_small = ia.imresize_single_image(
self.image,
(20, 25),
interpolation="cubic")
hm = ia.HeatmapsOnImage(
img_small.astype(np.float32)/255.0,
shape=(30, 30))
img_aug = aug.augment_image(self.image)
hm_aug = aug.augment_heatmaps([hm])[0]
expected = (y2 - y1, x2 - x1)
expected_small = (y2_small - y1_small, x2_small - x1_small, 1)
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(hm_aug.shape, expected)
])
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(hm_aug.arr_0to1.shape, expected_small)
])
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, img_aug.shape[0:2], interpolation="cubic"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.96
def test_segmaps_smaller_than_image_without_keep_size(self):
# without keep_size, different segmap size
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
y1 = int(30*0.2)
y2 = int(30*0.8)
x1 = int(30*0.2)
x2 = int(30*0.8)
x1_small = int(25*0.2)
x2_small = int(25*0.8)
y1_small = int(20*0.2)
y2_small = int(20*0.8)
img_small = ia.imresize_single_image(
self.image,
(20, 25),
interpolation="cubic")
seg = SegmentationMapsOnImage(
(img_small > 100).astype(np.int32),
shape=(30, 30))
img_aug = aug.augment_image(self.image)
seg_aug = aug.augment_segmentation_maps([seg])[0]
expected = (y2 - y1, x2 - x1)
expected_small = (y2_small - y1_small, x2_small - x1_small, 1)
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(seg_aug.shape, expected)
])
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(seg_aug.arr.shape, expected_small)
])
img_aug_mask = img_aug > 255*0.5
seg_aug_mask = ia.imresize_single_image(
seg_aug.arr, img_aug.shape[0:2], interpolation="nearest") > 0
same = np.sum(img_aug_mask == seg_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.92
def test_image_with_keep_size(self):
# with keep_size
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_image(self.image)
expected = self.image[int(30*0.2):int(30*0.8),
int(30*0.2):int(30*0.8)]
expected = ia.imresize_single_image(
expected,
self.image.shape[0:2],
interpolation="cubic")
assert observed.shape == self.image.shape
# differences seem to mainly appear around the border of the inner
# rectangle, possibly due to interpolation
assert np.average(
np.abs(observed.astype(np.int32) - expected.astype(np.int32))
) < 30.0
def test_heatmaps_with_keep_size(self):
# with keep_size, heatmaps
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_heatmaps([self.heatmaps])[0]
heatmaps_arr = self.heatmaps.get_arr()
expected = heatmaps_arr[int(30*0.2):int(30*0.8),
int(30*0.2):int(30*0.8)]
expected = ia.imresize_single_image(
(expected*255).astype(np.uint8),
self.image.shape[0:2],
interpolation="cubic")
expected = (expected / 255.0).astype(np.float32)
assert observed.shape == self.heatmaps.shape
_assert_same_min_max(observed, self.heatmaps)
# differences seem to mainly appear around the border of the inner
# rectangle, possibly due to interpolation
assert np.average(np.abs(observed.get_arr() - expected)) < 30.0
def test_segmaps_with_keep_size(self):
# with keep_size, segmaps
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
segmaps_arr = self.segmaps.get_arr()
expected = segmaps_arr[int(30*0.2):int(30*0.8),
int(30*0.2):int(30*0.8)]
expected = ia.imresize_single_image(
(expected*255).astype(np.uint8),
self.image.shape[0:2],
interpolation="cubic")
expected = (expected > 255*0.5).astype(np.int32)
assert observed.shape == self.segmaps.shape
assert np.average(observed.get_arr() != expected) < 0.05
def test_image_rgb_with_keep_size(self):
# with keep_size, RGB images
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
imgs = np.tile(self.image[np.newaxis, :, :, np.newaxis], (2, 1, 1, 3))
observed = aug.augment_images(imgs)
for img_idx in sm.xrange(2):
for c in sm.xrange(3):
observed_i = observed[img_idx, :, :, c]
expected = imgs[img_idx,
int(30*0.2):int(30*0.8),
int(30*0.2):int(30*0.8),
c]
expected = ia.imresize_single_image(
expected, imgs.shape[1:3], interpolation="cubic")
assert observed_i.shape == imgs.shape[1:3]
# differences seem to mainly appear around the border of the
# inner rectangle, possibly due to interpolation
assert np.average(
np.abs(
observed_i.astype(np.int32) - expected.astype(np.int32)
)
) < 30.0
# --------
# keypoints
# --------
def test_keypoints_without_keep_size(self):
# keypoint augmentation without keep_size
# TODO deviations of around 0.4-0.7 in this and the next test (between
# expected and observed coordinates) -- why?
kps = [ia.Keypoint(x=10, y=10), ia.Keypoint(x=14, y=11)]
kpsoi = ia.KeypointsOnImage(kps, shape=self.image.shape)
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_keypoints([kpsoi])
kps_expected = [
ia.Keypoint(x=10-0.2*30, y=10-0.2*30),
ia.Keypoint(x=14-0.2*30, y=11-0.2*30)
]
gen = zip(observed[0].keypoints, kps_expected)
# TODO deviations of around 0.5 here from expected values, why?
for kp_observed, kp_expected in gen:
assert kp_observed.coords_almost_equals(
kp_expected, max_distance=1.5)
def test_keypoints_with_keep_size(self):
# keypoint augmentation with keep_size
kps = [ia.Keypoint(x=10, y=10), ia.Keypoint(x=14, y=11)]
kpsoi = ia.KeypointsOnImage(kps, shape=self.image.shape)
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_keypoints([kpsoi])
kps_expected = [
ia.Keypoint(x=((10-0.2*30)/(30*0.6))*30,
y=((10-0.2*30)/(30*0.6))*30),
ia.Keypoint(x=((14-0.2*30)/(30*0.6))*30,
y=((11-0.2*30)/(30*0.6))*30)
]
gen = zip(observed[0].keypoints, kps_expected)
# TODO deviations of around 0.5 here from expected values, why?
for kp_observed, kp_expected in gen:
assert kp_observed.coords_almost_equals(
kp_expected, max_distance=1.5)
def test_image_keypoint_alignment(self):
img = np.zeros((100, 100), dtype=np.uint8)
img[25-3:25+3, 25-3:25+3] = 255
img[50-3:50+3, 25-3:25+3] = 255
img[75-3:75+3, 25-3:25+3] = 255
img[25-3:25+3, 75-3:75+3] = 255
img[50-3:50+3, 75-3:75+3] = 255
img[75-3:75+3, 75-3:75+3] = 255
img[50-3:75+3, 50-3:75+3] = 255
kps = [
ia.Keypoint(y=25, x=25), ia.Keypoint(y=50, x=25),
ia.Keypoint(y=75, x=25), ia.Keypoint(y=25, x=75),
ia.Keypoint(y=50, x=75), ia.Keypoint(y=75, x=75),
ia.Keypoint(y=50, x=50)
]
kpsoi = ia.KeypointsOnImage(kps, shape=img.shape)
aug = iaa.PerspectiveTransform(scale=(0.05, 0.15), keep_size=True)
for _ in sm.xrange(10):
aug_det = aug.to_deterministic()
imgs_aug = aug_det.augment_images([img, img])
kpsois_aug = aug_det.augment_keypoints([kpsoi, kpsoi])
for img_aug, kpsoi_aug in zip(imgs_aug, kpsois_aug):
assert kpsoi_aug.shape == img.shape
for kp_aug in kpsoi_aug.keypoints:
x, y = int(np.round(kp_aug.x)), int(np.round(kp_aug.y))
if 0 <= x < img.shape[1] and 0 <= y < img.shape[0]:
assert img_aug[y, x] > 10
def test_empty_keypoints(self):
# test empty keypoints
kpsoi = ia.KeypointsOnImage([], shape=(20, 10, 3))
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
observed = aug.augment_keypoints(kpsoi)
assert_cbaois_equal(observed, kpsoi)
# --------
# abstract test methods for polygons and line strings
# --------
@classmethod
def _test_cbaois_without_keep_size(cls, cba_class, cbaoi_class, augf_name):
points = np.float32([
[10, 10],
[25, 10],
[25, 25],
[10, 25]
])
cbaoi = cbaoi_class([cba_class(points)], shape=(30, 30, 3))
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
observed = getattr(aug, augf_name)(cbaoi)
assert observed.shape == (30 - 12, 30 - 12, 3)
assert len(observed.items) == 1
if hasattr(observed.items[0], "is_valid"):
assert observed.items[0].is_valid
points_expected = np.copy(points)
points_expected[:, 0] -= 0.2 * 30
points_expected[:, 1] -= 0.2 * 30
# TODO deviations of around 0.5 here from expected values, why?
assert observed.items[0].coords_almost_equals(
points_expected, max_distance=1.5)
@classmethod
def _test_cbaois_with_keep_size(cls, cba_class, cbaoi_class, augf_name):
# polygon augmentation with keep_size
points = np.float32([
[10, 10],
[25, 10],
[25, 25],
[10, 25]
])
cbaoi = cbaoi_class([cba_class(points)], shape=(30, 30, 3))
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = getattr(aug, augf_name)(cbaoi)
assert observed.shape == (30, 30, 3)
assert len(observed.items) == 1
if hasattr(observed.items[0], "is_valid"):
assert observed.items[0].is_valid
points_expected = np.copy(points)
points_expected[:, 0] = (
(points_expected[:, 0] - 0.2 * 30) / (30 * 0.6)
) * 30
points_expected[:, 1] = (
(points_expected[:, 1] - 0.2 * 30) / (30 * 0.6)
) * 30
# TODO deviations of around 0.5 here from expected values, why?
assert observed.items[0].coords_almost_equals(
points_expected, max_distance=2.5)
@classmethod
def _test_image_cba_alignment(cls, cba_class, cbaoi_class, augf_name):
img = np.zeros((100, 100), dtype=np.uint8)
img[25-3:25+3, 25-3:25+3] = 255
img[50-3:50+3, 25-3:25+3] = 255
img[75-3:75+3, 25-3:25+3] = 255
img[25-3:25+3, 75-3:75+3] = 255
img[50-3:50+3, 75-3:75+3] = 255
img[75-3:75+3, 75-3:75+3] = 255
points = [
[25, 25],
[75, 25],
[75, 50],
[75, 75],
[25, 75],
[25, 50]
]
cbaoi = cbaoi_class([cba_class(points)], shape=img.shape)
aug = iaa.PerspectiveTransform(scale=0.1, keep_size=True)
for _ in sm.xrange(10):
aug_det = aug.to_deterministic()
imgs_aug = aug_det.augment_images([img] * 4)
cbaois_aug = getattr(aug_det, augf_name)([cbaoi] * 4)
for img_aug, cbaoi_aug in zip(imgs_aug, cbaois_aug):
assert cbaoi_aug.shape == img.shape
for cba_aug in cbaoi_aug.items:
if hasattr(cba_aug, "is_valid"):
assert cba_aug.is_valid
for x, y in cba_aug.coords:
if 0 <= x < img.shape[1] and 0 <= y < img.shape[0]:
bb = ia.BoundingBox(x1=x-2, x2=x+2, y1=y-2, y2=y+2)
img_ex = bb.extract_from_image(img_aug)
assert np.any(img_ex > 10)
@classmethod
def _test_empty_cba(cls, cbaoi, augf_name):
# test empty polygons
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi)
# --------
# polygons
# --------
def test_polygons_without_keep_size(self):
self._test_cbaois_without_keep_size(ia.Polygon, ia.PolygonsOnImage,
"augment_polygons")
def test_polygons_with_keep_size(self):
self._test_cbaois_with_keep_size(ia.Polygon, ia.PolygonsOnImage,
"augment_polygons")
def test_image_polygon_alignment(self):
self._test_image_cba_alignment(ia.Polygon, ia.PolygonsOnImage,
"augment_polygons")
def test_empty_polygons(self):
psoi = ia.PolygonsOnImage([], shape=(20, 10, 3))
self._test_empty_cba(psoi, "augment_polygons")
def test_polygons_under_extreme_scale_values(self):
# test extreme scales
# TODO when setting .min_height and .min_width in PerspectiveTransform
# to 1x1, at least one of the output polygons was invalid and had
# only 3 instead of the expected 4 points - why?
for scale in [0.1, 0.2, 0.3, 0.4]:
with self.subTest(scale=scale):
exterior = np.float32([
[10, 10],
[25, 10],
[25, 25],
[10, 25]
])
psoi = ia.PolygonsOnImage([ia.Polygon(exterior)],
shape=(30, 30, 3))
aug = iaa.PerspectiveTransform(scale=scale, keep_size=True)
aug.jitter = iap.Deterministic(scale)
observed = aug.augment_polygons(psoi)
assert observed.shape == (30, 30, 3)
assert len(observed.polygons) == 1
assert observed.polygons[0].is_valid
# FIXME this part is currently deactivated due to too large
# deviations from expectations. As the alignment check
# works, this is probably some error on the test side
"""
exterior_expected = np.copy(exterior)
exterior_expected[:, 0] = (
(exterior_expected[:, 0] - scale * 30) / (30*(1-2*scale))
) * 30
exterior_expected[:, 1] = (
(exterior_expected[:, 1] - scale * 30) / (30*(1-2*scale))
) * 30
poly0 = observed.polygons[0]
# TODO deviations of around 0.5 here from expected values, why?
assert poly0.exterior_almost_equals(
exterior_expected, max_distance=2.0)
"""
# --------
# line strings
# --------
def test_line_strings_without_keep_size(self):
self._test_cbaois_without_keep_size(ia.LineString, ia.LineStringsOnImage,
"augment_line_strings")
def test_line_strings_with_keep_size(self):
self._test_cbaois_with_keep_size(ia.LineString, ia.LineStringsOnImage,
"augment_line_strings")
def test_image_line_string_alignment(self):
self._test_image_cba_alignment(ia.LineString, ia.LineStringsOnImage,
"augment_line_strings")
def test_empty_line_strings(self):
lsoi = ia.LineStringsOnImage([], shape=(20, 10, 3))
self._test_empty_cba(lsoi, "augment_line_strings")
# --------
# bounding boxes
# --------
def test_bounding_boxes_without_keep_size(self):
# BB augmentation without keep_size
# TODO deviations of around 0.4-0.7 in this and the next test (between
# expected and observed coordinates) -- why?
bbs = [ia.BoundingBox(x1=0, y1=10, x2=20, y2=20)]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_bounding_boxes([bbsoi])
bbs_expected = [
ia.BoundingBox(x1=0-0.2*30, y1=10-0.2*30,
x2=20-0.2*30, y2=20-0.2*30)
]
gen = zip(observed[0].bounding_boxes, bbs_expected)
# TODO deviations of around 0.5 here from expected values, why?
for bb_observed, bb_expected in gen:
assert bb_observed.coords_almost_equals(
bb_expected, max_distance=1.5)
def test_bounding_boxes_with_keep_size(self):
# BB augmentation with keep_size
bbs = [ia.BoundingBox(x1=0, y1=10, x2=20, y2=20)]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_bounding_boxes([bbsoi])
bbs_expected = [
ia.BoundingBox(
x1=((0-0.2*30)/(30*0.6))*30,
y1=((10-0.2*30)/(30*0.6))*30,
x2=((20-0.2*30)/(30*0.6))*30,
y2=((20-0.2*30)/(30*0.6))*30
)
]
gen = zip(observed[0].bounding_boxes, bbs_expected)
# TODO deviations of around 0.5 here from expected values, why?
for bb_observed, bb_expected in gen:
assert bb_observed.coords_almost_equals(
bb_expected, max_distance=1.5)
def test_image_bounding_box_alignment(self):
img = np.zeros((100, 100), dtype=np.uint8)
img[35:35+1, 35:65+1] = 255
img[65:65+1, 35:65+1] = 255
img[35:65+1, 35:35+1] = 255
img[35:65+1, 65:65+1] = 255
bbs = [
ia.BoundingBox(y1=35.5, x1=35.5, y2=65.5, x2=65.5),
]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=img.shape)
aug = iaa.PerspectiveTransform(scale=(0.05, 0.2), keep_size=True)
for _ in sm.xrange(10):
imgs_aug, bbsois_aug = aug(
images=[img, img, img, img],
bounding_boxes=[bbsoi, bbsoi, bbsoi, bbsoi])
nb_skipped = 0
for img_aug, bbsoi_aug in zip(imgs_aug, bbsois_aug):
assert bbsoi_aug.shape == img_aug.shape
for bb_aug in bbsoi_aug.bounding_boxes:
if bb_aug.is_fully_within_image(img_aug):
# top, bottom, left, right
x1 = bb_aug.x1_int
x2 = bb_aug.x2_int
y1 = bb_aug.y1_int
y2 = bb_aug.y2_int
top_row = img_aug[y1-1:y1+1, x1-1:x2+1]
btm_row = img_aug[y2-1:y2+1, x1-1:x2+1]
lft_row = img_aug[y1-1:y2+1, x1-1:x1+1]
rgt_row = img_aug[y1-1:y2+1, x2-1:x2+1]
assert np.max(top_row) > 10
assert np.max(btm_row) > 10
assert np.max(lft_row) > 10
assert np.max(rgt_row) > 10
else:
nb_skipped += 1
assert nb_skipped <= 2
def test_bounding_boxes_cover_extreme_points(self):
# Test that for BBs, the augmented BB x coord is really the minimum
# of the BB corner x-coords after augmentation and e.g. not just always
# the augmented top-left corner's coordinate.
h = w = 200 # height, width
s = 5 # block size
j_r = 0.1 # relative amount of jitter
j = int(h * j_r) # absolute amount of jitter
# Note that PerspectiveTransform currently places four points on the
# image and back-projects to the image size (roughly).
# That's why e.g. TopWiderThanBottom has coordinates that seem like
# the top is thinner than the bottom (after projecting back to the
# image rectangle, the top becomes wider).
class _JitterTopWiderThanBottom(object):
def draw_samples(self, size, random_state):
return np.float32([
[
[j_r, 0.0], # top-left
[j_r, 0.0], # top-right
[0.0, 0.0], # bottom-right
[0.0, 0.0], # bottom-left
]
])
class _JitterTopThinnerThanBottom(object):
def draw_samples(self, size, random_state):
return np.float32([
[
[0.0, 0.0], # top-left
[0.0, 0.0], # top-right
[j_r, 0.0], # bottom-right
[j_r, 0.0], # bottom-left
]
])
class _JitterLeftWiderThanRight(object):
def draw_samples(self, size, random_state):
return np.float32([
[
[0.0, j_r], # top-left
[0.0, 0.0], # top-right
[0.0, 0.0], # bottom-right
[0.0, j_r], # bottom-left
]
])
class _JitterLeftThinnerThanRight(object):
def draw_samples(self, size, random_state):
return np.float32([
[
[0.0, 0.0], # top-left
[0.0, j_r], # top-right
[0.0, j_r], # bottom-right
[0.0, 0.0], # bottom-left
]
])
jitters = [
_JitterTopWiderThanBottom(),
_JitterTopThinnerThanBottom(),
_JitterLeftWiderThanRight(),
_JitterLeftThinnerThanRight(),
]
# expected coordinates after applying the above jitter
# coordinates here are given as
# (ystart, yend), (xstart, xend)
coords = [
# top wider than bottom
[
[(0+j, s+j+1), (0, s+1)], # top left
[(0+j, s+j+1), (w-s, w+1)], # top right
[(h-s-j, h-j+1), (w-s-j, w-j+1)], # bottom right
[(h-s-j, h-j+1), (0+j, s+j+1)] # bottom left
],
# top thinner than bottom
[
[(0+j, s+j+1), (0+j, s+j+1)],
[(0+j, s+j+1), (w-s-j, w-j+1)],
[(h-s-j, h-j+1), (w-s, w+1)],
[(h-s-j, h-j+1), (0, s+1)]
],
# left wider than right
[
[(0, s+1), (0+j, s+j+1)],
[(0+j, s+j+1), (w-s-j, w-j+1)],
[(h-s-j, h-j+1), (w-s-j, w-j+1)],
[(h-s, h+1), (0+j, s+j+1)]
],
# left thinner than right
[
[(0+j, s+j+1), (0+j, s+j+1)],
[(0, s+1), (w-s-j, w-j+1)],
[(h-s, h+1), (w-s-j, w-j+1)],
[(h-s-j, h-j+1), (0+j, s+j+1)]
],
]
image = np.zeros((h-1, w-1, 4), dtype=np.uint8)
image = ia.pad(image, top=1, right=1, bottom=1, left=1, cval=50)
image[0+j:s+j+1, 0+j:s+j+1, 0] = 255
image[0+j:s+j+1, w-s-j:w-j+1, 1] = 255
image[h-s-j:h-j+1, w-s-j:w-j+1, 2] = 255
image[h-s-j:h-j+1, 0+j:s+j+1, 3] = 255
bb = ia.BoundingBox(x1=0.0+j,
y1=0.0+j,
x2=w-j,
y2=h-j)
bbsoi = ia.BoundingBoxesOnImage([bb], shape=image.shape)
i = 0
for jitter, coords_i in zip(jitters, coords):
with self.subTest(jitter=jitter.__class__.__name__):
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = jitter
image_aug, bbsoi_aug = aug(image=image, bounding_boxes=bbsoi)
assert image_aug.shape == image.shape
import imageio
imageio.imwrite("tmp"+str(i)+".jpg", image_aug[:, :, 0:3])
i += 1
(tl_y1, tl_y2), (tl_x1, tl_x2) = coords_i[0]
(tr_y1, tr_y2), (tr_x1, tr_x2) = coords_i[1]
(br_y1, br_y2), (br_x1, br_x2) = coords_i[2]
(bl_y1, bl_y2), (bl_x1, bl_x2) = coords_i[3]
# We have to be rather tolerant here (>100 instead of e.g.
# >200), because the transformation seems to be not that
# accurate and the blobs may be a few pixels off the expected
# coorindates.
assert np.max(image_aug[tl_y1:tl_y2, tl_x1:tl_x2, 0]) > 100
assert np.max(image_aug[tr_y1:tr_y2, tr_x1:tr_x2, 1]) > 100
assert np.max(image_aug[br_y1:br_y2, br_x1:br_x2, 2]) > 100
assert np.max(image_aug[bl_y1:bl_y2, bl_x1:bl_x2, 3]) > 100
# We have rather strong tolerances of 7.5 here, partially
# because the blobs are wide and the true coordinates are in
# the center of the blobs; partially, because of above
# mentioned inaccuracy of PerspectiveTransform.
bb_aug = bbsoi_aug.bounding_boxes[0]
exp_x1 = min([tl_x1, tr_x1, br_x1, bl_x1])
exp_x2 = max([tl_x2, tr_x2, br_x2, bl_x2])
exp_y1 = min([tl_y1, tr_y1, br_y1, bl_y1])
exp_y2 = max([tl_y2, tr_y2, br_y2, bl_y2])
assert np.isclose(bb_aug.x1, exp_x1, atol=7.5)
assert np.isclose(bb_aug.y1, exp_y1, atol=7.5)
assert np.isclose(bb_aug.x2, exp_x2, atol=7.5)
assert np.isclose(bb_aug.y2, exp_y2, atol=7.5)
def test_empty_bounding_boxes(self):
# test empty bounding boxes
bbsoi = ia.BoundingBoxesOnImage([], shape=(20, 10, 3))
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
observed = aug.augment_bounding_boxes(bbsoi)
assert_cbaois_equal(observed, bbsoi)
# ------------
# mode
# ------------
def test_draw_samples_with_mode_being_int(self):
aug = iaa.PerspectiveTransform(scale=0.001, mode=cv2.BORDER_REPLICATE)
samples = aug._draw_samples([(10, 10, 3)], iarandom.RNG(0))
assert samples.modes.shape == (1,)
assert samples.modes[0] == cv2.BORDER_REPLICATE
def test_draw_samples_with_mode_being_string(self):
aug = iaa.PerspectiveTransform(scale=0.001, mode="replicate")
samples = aug._draw_samples([(10, 10, 3)], iarandom.RNG(0))
assert samples.modes.shape == (1,)
assert samples.modes[0] == cv2.BORDER_REPLICATE
def test_mode_replicate_copies_values(self):
aug = iaa.PerspectiveTransform(
scale=0.001, mode="replicate", cval=0, random_state=31)
img = np.ones((256, 256, 3), dtype=np.uint8) * 255
img_aug = aug.augment_image(img)
assert (img_aug == 255).all()
def test_mode_constant_uses_cval(self):
aug255 = iaa.PerspectiveTransform(
scale=0.001, mode="constant", cval=255, random_state=31)
aug0 = iaa.PerspectiveTransform(
scale=0.001, mode="constant", cval=0, random_state=31)
img = np.ones((256, 256, 3), dtype=np.uint8) * 255
img_aug255 = aug255.augment_image(img)
img_aug0 = aug0.augment_image(img)
assert (img_aug255 == 255).all()
assert not (img_aug0 == 255).all()
# ---------
# fit_output
# ---------
def test_fit_output_with_fixed_jitter(self):
aug = iaa.PerspectiveTransform(scale=0.2, fit_output=True,
keep_size=False)
aug.jitter = iap.Deterministic(0.2)
image = np.zeros((40, 40, 3), dtype=np.uint8)
image[0:3, 0:3, 0] = 255
image[0:3, 40-3:, 1] = 255
image[40-3:, 40-3:, 2] = 255
image_aug = aug(image=image)
h, w = image_aug.shape[0:2]
y0 = np.argmax(image_aug[:, 0, 0])
x0 = np.argmax(image_aug[0, :, 0])
y1 = np.argmax(image_aug[:, w-1, 1])
x1 = np.argmax(image_aug[0, :, 1])
y2 = np.argmax(image_aug[:, w-1, 2])
x2 = np.argmax(image_aug[h-1, :, 2])
# different shape
assert image_aug.shape != image.shape
# corners roughly still at top-left, top-right, bottom-right
assert 0 <= y0 <= 3
assert 0 <= x0 <= 3
assert 0 <= y1 <= 3
assert image_aug.shape[1]-3 <= x1 <= image_aug.shape[1]
assert image_aug.shape[1]-3 <= y2 <= image_aug.shape[1]
assert image_aug.shape[1]-3 <= x2 <= image_aug.shape[1]
# no corner pixels now in the center
assert np.max(image_aug[8:h-8, 8:w-8, :]) == 0
def test_fit_output_with_random_jitter(self):
aug = iaa.PerspectiveTransform(scale=0.1, fit_output=True,
keep_size=False)
image = np.zeros((50, 50, 4), dtype=np.uint8)
image[0:5, 0:5, 0] = 255
image[0:5, 50-5:, 1] = 255
image[50-5:, 50-5:, 2] = 255
image[50-5:, 0:5, 3] = 255
for _ in sm.xrange(10):
image_aug = aug(image=image)
h, w = image_aug.shape[0:2]
arr_nochan = np.max(image_aug, axis=2)
y_idx = np.where(np.max(arr_nochan, axis=1))[0]
x_idx = np.where(np.max(arr_nochan, axis=0))[0]
y_min = np.min(y_idx)
y_max = np.max(y_idx)
x_min = np.min(x_idx)
x_max = np.max(x_idx)
tol = 0
assert 0 <= y_min <= 5+tol
assert 0 <= x_min <= 5+tol
assert h-5-tol <= y_max <= h-1
assert w-5-tol <= x_max <= w-1
def test_fit_output_with_random_jitter__segmentation_maps(self):
aug = iaa.PerspectiveTransform(scale=0.1, fit_output=True,
keep_size=False)
arr = np.zeros((50, 50, 4), dtype=np.uint8)
arr[0:5, 0:5, 0] = 1
arr[0:5, 50-5:, 1] = 1
arr[50-5:, 50-5:, 2] = 1
arr[50-5:, 0:5, 3] = 1
segmap = ia.SegmentationMapsOnImage(arr, shape=(50, 50, 3))
image = np.zeros((49, 49, 3), dtype=np.uint8)
image = ia.pad(image, top=1, right=1, bottom=1, left=1, cval=128)
for _ in sm.xrange(10):
segmap_aug, image_aug = aug(segmentation_maps=segmap, image=image)
h, w = segmap_aug.arr.shape[0:2]
arr_nochan = np.max(segmap_aug.arr, axis=2)
y_idx = np.where(np.max(arr_nochan, axis=1))[0]
x_idx = np.where(np.max(arr_nochan, axis=0))[0]
y_min = np.min(y_idx)
y_max = np.max(y_idx)
x_min = np.min(x_idx)
x_max = np.max(x_idx)
tol = 0
assert 0 <= y_min <= 5+tol
assert 0 <= x_min <= 5+tol
assert h-5-tol <= y_max <= h-1
assert w-5-tol <= x_max <= w-1
def test_fit_output_with_fixed_jitter__keypoints(self):
aug = iaa.PerspectiveTransform(scale=0.1, fit_output=True,
keep_size=False)
kpsoi = ia.KeypointsOnImage.from_xy_array([
(0, 0),
(50, 0),
(50, 50),
(0, 50)
], shape=(50, 50, 3))
for _ in sm.xrange(10):
kpsoi_aug = aug(keypoints=kpsoi)
h, w = kpsoi_aug.shape[0:2]
y0, x0 = kpsoi_aug.keypoints[0].y, kpsoi_aug.keypoints[0].x
y1, x1 = kpsoi_aug.keypoints[1].y, kpsoi_aug.keypoints[1].x
y2, x2 = kpsoi_aug.keypoints[2].y, kpsoi_aug.keypoints[2].x
y3, x3 = kpsoi_aug.keypoints[3].y, kpsoi_aug.keypoints[3].x
y_min = min([y0, y1, y2, y3])
y_max = max([y0, y1, y2, y3])
x_min = min([x0, x1, x2, x3])
x_max = max([x0, x1, x2, x3])
tol = 0.5
assert 0-tol <= y_min <= tol
assert 0-tol <= x_min <= tol
assert h-tol <= y_max <= h+tol
assert w-tol <= x_max <= w+tol
# ---------
# unusual channel numbers
# ---------
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.PerspectiveTransform(scale=0.01)
image_aug = aug(image=image)
assert np.all(image_aug == 0)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
# ---------
# zero-sized axes
# ---------
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
for keep_size in [False, True]:
with self.subTest(shape=shape, keep_size=keep_size):
for _ in sm.xrange(3):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.PerspectiveTransform(scale=0.01)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
# --------
# get_parameters
# --------
def test_get_parameters(self):
aug = iaa.PerspectiveTransform(scale=0.1, keep_size=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Normal)
assert isinstance(params[0].scale, iap.Deterministic)
assert 0.1 - 1e-8 < params[0].scale.value < 0.1 + 1e-8
assert params[1] is False
assert params[2].value == 0
assert params[3].value == "constant"
assert params[4] is False
# --------
# other dtypes
# --------
def test_other_dtypes_bool(self):
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
y1 = int(30 * 0.2)
y2 = int(30 * 0.8)
x1 = int(30 * 0.2)
x2 = int(30 * 0.8)
image = np.zeros((30, 30), dtype=bool)
image[12:18, :] = True
image[:, 12:18] = True
expected = image[y1:y2, x1:x2]
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert image_aug.shape == expected.shape
assert (np.sum(image_aug == expected) / expected.size) > 0.9
def test_other_dtypes_uint_int(self):
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
y1 = int(30 * 0.2)
y2 = int(30 * 0.8)
x1 = int(30 * 0.2)
x2 = int(30 * 0.8)
dtypes = ["uint8", "uint16", "int8", "int16"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [0, 1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value-100, max_value]
values = values + [(-1)*value for value in values]
else:
values = [0, 1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value-100, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((30, 30), dtype=dtype)
image[12:18, :] = value
image[:, 12:18] = value
expected = image[y1:y2, x1:x2]
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert image_aug.shape == expected.shape
# rather high tolerance of 0.7 here because of
# interpolation
assert (
np.sum(image_aug == expected) / expected.size
) > 0.7
def test_other_dtypes_float(self):
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
y1 = int(30 * 0.2)
y2 = int(30 * 0.8)
x1 = int(30 * 0.2)
x2 = int(30 * 0.8)
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((30, 30), dtype=dtype)
image[12:18, :] = value
image[:, 12:18] = value
expected = image[y1:y2, x1:x2]
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert image_aug.shape == expected.shape
# rather high tolerance of 0.7 here because of
# interpolation
assert (
np.sum(_isclose(image_aug, expected)) / expected.size
) > 0.7
class _elastic_trans_temp_thresholds(object):
def __init__(self, alpha, sigma):
self.alpha = alpha
self.sigma = sigma
self.old_alpha = None
self.old_sigma = None
def __enter__(self):
self.old_alpha = iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH
self.old_sigma = iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH
iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH = self.alpha
iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH = self.sigma
def __exit__(self, exc_type, exc_val, exc_tb):
iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH = self.old_alpha
iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH = self.old_sigma
# TODO add tests for order
# TODO improve tests for cval
# TODO add tests for mode
class TestElasticTransformation(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
img = np.zeros((50, 50), dtype=np.uint8) + 255
img = np.pad(img, ((100, 100), (100, 100)), mode="constant",
constant_values=0)
return img
@property
def mask(self):
img = self.image
mask = img > 0
return mask
@property
def heatmaps(self):
img = self.image
return HeatmapsOnImage(img.astype(np.float32) / 255.0,
shape=img.shape)
@property
def segmaps(self):
img = self.image
return SegmentationMapsOnImage((img > 0).astype(np.int32),
shape=img.shape)
# -----------
# __init__
# -----------
def test___init___bad_datatype_for_alpha_leads_to_failure(self):
# test alpha having bad datatype
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=False, sigma=0.25)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___alpha_is_tuple(self):
# test alpha being tuple
aug = iaa.ElasticTransformation(alpha=(1.0, 2.0), sigma=0.25)
assert isinstance(aug.alpha, iap.Uniform)
assert isinstance(aug.alpha.a, iap.Deterministic)
assert isinstance(aug.alpha.b, iap.Deterministic)
assert 1.0 - 1e-8 < aug.alpha.a.value < 1.0 + 1e-8
assert 2.0 - 1e-8 < aug.alpha.b.value < 2.0 + 1e-8
def test___init___sigma_is_tuple(self):
# test sigma being tuple
aug = iaa.ElasticTransformation(alpha=0.25, sigma=(1.0, 2.0))
assert isinstance(aug.sigma, iap.Uniform)
assert isinstance(aug.sigma.a, iap.Deterministic)
assert isinstance(aug.sigma.b, iap.Deterministic)
assert 1.0 - 1e-8 < aug.sigma.a.value < 1.0 + 1e-8
assert 2.0 - 1e-8 < aug.sigma.b.value < 2.0 + 1e-8
def test___init___bad_datatype_for_sigma_leads_to_failure(self):
# test sigma having bad datatype
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=0.25, sigma=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___order_is_all(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=ia.ALL)
assert isinstance(aug.order, iap.Choice)
assert all([order in aug.order.a for order in [0, 1, 2, 3, 4, 5]])
def test___init___order_is_int(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=1)
assert isinstance(aug.order, iap.Deterministic)
assert aug.order.value == 1
def test___init___order_is_list(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=[0, 1, 2])
assert isinstance(aug.order, iap.Choice)
assert all([order in aug.order.a for order in [0, 1, 2]])
def test___init___order_is_stochastic_parameter(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0,
order=iap.Choice([0, 1, 2, 3]))
assert isinstance(aug.order, iap.Choice)
assert all([order in aug.order.a for order in [0, 1, 2, 3]])
def test___init___bad_datatype_for_order_leads_to_failure(self):
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___cval_is_all(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=ia.ALL)
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
def test___init___cval_is_int(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=128)
assert isinstance(aug.cval, iap.Deterministic)
assert aug.cval.value == 128
def test___init___cval_is_list(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0,
cval=[16, 32, 64])
assert isinstance(aug.cval, iap.Choice)
assert all([cval in aug.cval.a for cval in [16, 32, 64]])
def test___init___cval_is_stochastic_parameter(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0,
cval=iap.Choice([16, 32, 64]))
assert isinstance(aug.cval, iap.Choice)
assert all([cval in aug.cval.a for cval in [16, 32, 64]])
def test___init___cval_is_tuple(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=(128, 255))
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 128
assert aug.cval.b.value == 255
def test___init___bad_datatype_for_cval_leads_to_failure(self):
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___mode_is_all(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
assert all([
mode in aug.mode.a
for mode
in ["constant", "nearest", "reflect", "wrap"]])
def test___init___mode_is_string(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode="nearest")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "nearest"
def test___init___mode_is_list(self):
aug = iaa.ElasticTransformation(
alpha=0.25, sigma=1.0, mode=["constant", "nearest"])
assert isinstance(aug.mode, iap.Choice)
assert all([mode in aug.mode.a for mode in ["constant", "nearest"]])
def test___init___mode_is_stochastic_parameter(self):
aug = iaa.ElasticTransformation(
alpha=0.25, sigma=1.0, mode=iap.Choice(["constant", "nearest"]))
assert isinstance(aug.mode, iap.Choice)
assert all([mode in aug.mode.a for mode in ["constant", "nearest"]])
def test___init___bad_datatype_for_mode_leads_to_failure(self):
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# -----------
# alpha, sigma
# -----------
def test_images(self):
# test basic funtionality
aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25)
observed = aug.augment_image(self.image)
mask = self.mask
# assume that some white/255 pixels have been moved away from the
# center and replaced by black/0 pixels
assert np.sum(observed[mask]) < np.sum(self.image[mask])
# assume that some black/0 pixels have been moved away from the outer
# area and replaced by white/255 pixels
assert np.sum(observed[~mask]) > np.sum(self.image[~mask])
def test_images_nonsquare(self):
# test basic funtionality with non-square images
aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25)
img_nonsquare = np.zeros((50, 100), dtype=np.uint8) + 255
img_nonsquare = np.pad(img_nonsquare, ((100, 100), (100, 100)),
mode="constant", constant_values=0)
mask_nonsquare = (img_nonsquare > 0)
observed = aug.augment_image(img_nonsquare)
assert (
np.sum(observed[mask_nonsquare])
< np.sum(img_nonsquare[mask_nonsquare]))
assert (
np.sum(observed[~mask_nonsquare])
> np.sum(img_nonsquare[~mask_nonsquare]))
def test_images_unusual_channel_numbers(self):
# test unusual channels numbers
aug = iaa.ElasticTransformation(alpha=5, sigma=0.5)
for nb_channels in [1, 2, 4, 5, 7, 10, 11]:
img_c = np.tile(self.image[..., np.newaxis], (1, 1, nb_channels))
assert img_c.shape == (250, 250, nb_channels)
observed = aug.augment_image(img_c)
assert observed.shape == (250, 250, nb_channels)
for c in sm.xrange(1, nb_channels):
assert np.array_equal(observed[..., c], observed[..., 0])
def test_heatmaps(self):
# test basic funtionality, heatmaps
aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25)
observed = aug.augment_heatmaps([self.heatmaps])[0]
mask = self.mask
assert observed.shape == self.heatmaps.shape
_assert_same_min_max(observed, self.heatmaps)
assert (
np.sum(observed.get_arr()[mask])
< np.sum(self.heatmaps.get_arr()[mask]))
assert (
np.sum(observed.get_arr()[~mask])
> np.sum(self.heatmaps.get_arr()[~mask]))
def test_segmaps(self):
# test basic funtionality, segmaps
# alpha=1.5 instead of 0.5 as above here, because otherwise nothing
# is moved
aug = iaa.ElasticTransformation(alpha=1.5, sigma=0.25)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
mask = self.mask
assert observed.shape == self.segmaps.shape
assert (
np.sum(observed.get_arr()[mask])
< np.sum(self.segmaps.get_arr()[mask]))
assert (
np.sum(observed.get_arr()[~mask])
> np.sum(self.segmaps.get_arr()[~mask]))
def test_images_weak_vs_strong_alpha(self):
# test effects of increased alpha strength
aug1 = iaa.ElasticTransformation(alpha=0.1, sigma=0.25)
aug2 = iaa.ElasticTransformation(alpha=5.0, sigma=0.25)
observed1 = aug1.augment_image(self.image)
observed2 = aug2.augment_image(self.image)
mask = self.mask
# assume that the inner area has become more black-ish when using high
# alphas (more white pixels were moved out of the inner area)
assert np.sum(observed1[mask]) > np.sum(observed2[mask])
# assume that the outer area has become more white-ish when using high
# alphas (more black pixels were moved into the inner area)
assert np.sum(observed1[~mask]) < np.sum(observed2[~mask])
def test_heatmaps_weak_vs_strong_alpha(self):
# test effects of increased alpha strength, heatmaps
aug1 = iaa.ElasticTransformation(alpha=0.1, sigma=0.25)
aug2 = iaa.ElasticTransformation(alpha=5.0, sigma=0.25)
observed1 = aug1.augment_heatmaps([self.heatmaps])[0]
observed2 = aug2.augment_heatmaps([self.heatmaps])[0]
mask = self.mask
assert observed1.shape == self.heatmaps.shape
assert observed2.shape == self.heatmaps.shape
_assert_same_min_max(observed1, self.heatmaps)
_assert_same_min_max(observed2, self.heatmaps)
assert (
np.sum(observed1.get_arr()[mask])
> np.sum(observed2.get_arr()[mask]))
assert (
np.sum(observed1.get_arr()[~mask])
< np.sum(observed2.get_arr()[~mask]))
def test_segmaps_weak_vs_strong_alpha(self):
# test effects of increased alpha strength, segmaps
aug1 = iaa.ElasticTransformation(alpha=0.1, sigma=0.25)
aug2 = iaa.ElasticTransformation(alpha=5.0, sigma=0.25)
observed1 = aug1.augment_segmentation_maps([self.segmaps])[0]
observed2 = aug2.augment_segmentation_maps([self.segmaps])[0]
mask = self.mask
assert observed1.shape == self.segmaps.shape
assert observed2.shape == self.segmaps.shape
assert (
np.sum(observed1.get_arr()[mask])
> np.sum(observed2.get_arr()[mask]))
assert (
np.sum(observed1.get_arr()[~mask])
< np.sum(observed2.get_arr()[~mask]))
def test_images_low_vs_high_sigma(self):
# test effects of increased sigmas
aug1 = iaa.ElasticTransformation(alpha=3.0, sigma=0.1)
aug2 = iaa.ElasticTransformation(alpha=3.0, sigma=3.0)
observed1 = aug1.augment_image(self.image)
observed2 = aug2.augment_image(self.image)
observed1_std_hori = np.std(
observed1.astype(np.float32)[:, 1:]
- observed1.astype(np.float32)[:, :-1])
observed2_std_hori = np.std(
observed2.astype(np.float32)[:, 1:]
- observed2.astype(np.float32)[:, :-1])
observed1_std_vert = np.std(
observed1.astype(np.float32)[1:, :]
- observed1.astype(np.float32)[:-1, :])
observed2_std_vert = np.std(
observed2.astype(np.float32)[1:, :]
- observed2.astype(np.float32)[:-1, :])
observed1_std = (observed1_std_hori + observed1_std_vert) / 2
observed2_std = (observed2_std_hori + observed2_std_vert) / 2
assert observed1_std > observed2_std
def test_images_alpha_is_stochastic_parameter(self):
# test alpha being iap.Choice
aug = iaa.ElasticTransformation(alpha=iap.Choice([0.001, 5.0]),
sigma=0.25)
seen = [0, 0]
for _ in sm.xrange(100):
observed = aug.augment_image(self.image)
diff = np.average(
np.abs(
self.image.astype(np.float32)
- observed.astype(np.float32)
)
)
if diff < 1.0:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 10
assert seen[1] > 10
def test_sigma_is_stochastic_parameter(self):
# test sigma being iap.Choice
aug = iaa.ElasticTransformation(alpha=3.0,
sigma=iap.Choice([0.01, 5.0]))
seen = [0, 0]
for _ in sm.xrange(100):
observed = aug.augment_image(self.image)
observed_std_hori = np.std(
observed.astype(np.float32)[:, 1:]
- observed.astype(np.float32)[:, :-1])
observed_std_vert = np.std(
observed.astype(np.float32)[1:, :]
- observed.astype(np.float32)[:-1, :])
observed_std = (observed_std_hori + observed_std_vert) / 2
if observed_std > 10.0:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 10
assert seen[1] > 10
# -----------
# cval
# -----------
def test_images_cval_is_int_and_order_is_0(self):
aug = iaa.ElasticTransformation(alpha=30.0, sigma=3.0, mode="constant",
cval=255, order=0)
img = np.zeros((100, 100), dtype=np.uint8)
observed = aug.augment_image(img)
assert np.sum(observed == 255) > 0
assert np.sum(np.logical_and(0 < observed, observed < 255)) == 0
def test_images_cval_is_int_and_order_is_0_weak_alpha(self):
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode="constant",
cval=0, order=0)
img = np.zeros((100, 100), dtype=np.uint8)
observed = aug.augment_image(img)
assert np.sum(observed == 255) == 0
def test_images_cval_is_int_and_order_is_2(self):
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode="constant",
cval=255, order=2)
img = np.zeros((100, 100), dtype=np.uint8)
observed = aug.augment_image(img)
assert np.sum(np.logical_and(0 < observed, observed < 255)) > 0
def test_heatmaps_ignore_cval(self):
# cval with heatmaps
heatmaps = HeatmapsOnImage(
np.zeros((32, 32, 1), dtype=np.float32), shape=(32, 32, 3))
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0,
mode="constant", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
_assert_same_min_max(observed, heatmaps)
assert np.sum(observed.get_arr() > 0.01) == 0
def test_segmaps_ignore_cval(self):
# cval with segmaps
segmaps = SegmentationMapsOnImage(
np.zeros((32, 32, 1), dtype=np.int32), shape=(32, 32, 3))
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode="constant",
cval=255)
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == segmaps.shape
assert np.sum(observed.get_arr() > 0) == 0
# -----------
# keypoints
# -----------
def test_keypoints_no_movement_if_alpha_below_threshold(self):
# for small alpha, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=1.0, sigma=0.0):
kps = [
ia.Keypoint(x=1, y=1), ia.Keypoint(x=15, y=25),
ia.Keypoint(x=5, y=5), ia.Keypoint(x=7, y=4),
ia.Keypoint(x=48, y=5), ia.Keypoint(x=21, y=37),
ia.Keypoint(x=32, y=39), ia.Keypoint(x=6, y=8),
ia.Keypoint(x=12, y=21), ia.Keypoint(x=3, y=45),
ia.Keypoint(x=45, y=3), ia.Keypoint(x=7, y=48)]
kpsoi = ia.KeypointsOnImage(kps, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = aug.augment_keypoints([kpsoi])[0]
d = kpsoi.to_xy_array() - observed.to_xy_array()
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 1e-8
def test_keypoints_no_movement_if_sigma_below_threshold(self):
# for small sigma, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=1.0):
kps = [
ia.Keypoint(x=1, y=1), ia.Keypoint(x=15, y=25),
ia.Keypoint(x=5, y=5), ia.Keypoint(x=7, y=4),
ia.Keypoint(x=48, y=5), ia.Keypoint(x=21, y=37),
ia.Keypoint(x=32, y=39), ia.Keypoint(x=6, y=8),
ia.Keypoint(x=12, y=21), ia.Keypoint(x=3, y=45),
ia.Keypoint(x=45, y=3), ia.Keypoint(x=7, y=48)]
kpsoi = ia.KeypointsOnImage(kps, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=1.0, sigma=0.001)
observed = aug.augment_keypoints([kpsoi])[0]
d = kpsoi.to_xy_array() - observed.to_xy_array()
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 1e-8
def test_keypoints_small_movement_for_weak_alpha_if_threshold_zero(self):
# for small alpha (at sigma 1.0), should barely move
# if thresholds set to zero
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=0.0):
kps = [
ia.Keypoint(x=1, y=1), ia.Keypoint(x=15, y=25),
ia.Keypoint(x=5, y=5), ia.Keypoint(x=7, y=4),
ia.Keypoint(x=48, y=5), ia.Keypoint(x=21, y=37),
ia.Keypoint(x=32, y=39), ia.Keypoint(x=6, y=8),
ia.Keypoint(x=12, y=21), ia.Keypoint(x=3, y=45),
ia.Keypoint(x=45, y=3), ia.Keypoint(x=7, y=48)]
kpsoi = ia.KeypointsOnImage(kps, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = aug.augment_keypoints([kpsoi])[0]
d = kpsoi.to_xy_array() - observed.to_xy_array()
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 0.5
def test_image_keypoint_alignment(self):
# test alignment between between images and keypoints
image = np.zeros((120, 70), dtype=np.uint8)
s = 3
image[:, 35-s:35+s+1] = 255
kps = [ia.Keypoint(x=35, y=20),
ia.Keypoint(x=35, y=40),
ia.Keypoint(x=35, y=60),
ia.Keypoint(x=35, y=80),
ia.Keypoint(x=35, y=100)]
kpsoi = ia.KeypointsOnImage(kps, shape=image.shape)
aug = iaa.ElasticTransformation(alpha=70, sigma=5)
aug_det = aug.to_deterministic()
images_aug = aug_det.augment_images([image, image])
kpsois_aug = aug_det.augment_keypoints([kpsoi, kpsoi])
count_bad = 0
for image_aug, kpsoi_aug in zip(images_aug, kpsois_aug):
assert kpsoi_aug.shape == (120, 70)
assert len(kpsoi_aug.keypoints) == 5
for kp_aug in kpsoi_aug.keypoints:
x, y = int(np.round(kp_aug.x)), int(np.round(kp_aug.y))
bb = ia.BoundingBox(x1=x-2, x2=x+2+1, y1=y-2, y2=y+2+1)
img_ex = bb.extract_from_image(image_aug)
if np.any(img_ex > 10):
pass # close to expected location
else:
count_bad += 1
assert count_bad <= 1
def test_empty_keypoints(self):
aug = iaa.ElasticTransformation(alpha=10, sigma=10)
kpsoi = ia.KeypointsOnImage([], shape=(10, 10, 3))
kpsoi_aug = aug.augment_keypoints(kpsoi)
assert len(kpsoi_aug.keypoints) == 0
assert kpsoi_aug.shape == (10, 10, 3)
# -----------
# abstract methods for polygons and line strings
# -----------
@classmethod
def _test_cbaois_no_movement_if_alpha_below_threshold(
cls, cba_class, cbaoi_class, augf_name):
# for small alpha, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=1.0, sigma=0.0):
cba = cba_class([(10, 15), (40, 15), (40, 35), (10, 35)])
cbaoi = cbaoi_class([cba], shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = getattr(aug, augf_name)(cbaoi)
assert observed.shape == (50, 50)
assert len(observed.items) == 1
assert observed.items[0].coords_almost_equals(cba)
if hasattr(observed.items[0], "is_valid"):
assert observed.items[0].is_valid
@classmethod
def _test_cbaois_no_movement_if_sigma_below_threshold(
cls, cba_class, cbaoi_class, augf_name):
# for small sigma, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=1.0):
cba = cba_class([(10, 15), (40, 15), (40, 35), (10, 35)])
cbaoi = cbaoi_class([cba], shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=1.0, sigma=0.001)
observed = getattr(aug, augf_name)(cbaoi)
assert observed.shape == (50, 50)
assert len(observed.items) == 1
assert observed.items[0].coords_almost_equals(cba)
if hasattr(observed.items[0], "is_valid"):
assert observed.items[0].is_valid
@classmethod
def _test_cbaois_small_movement_for_weak_alpha_if_threshold_zero(
cls, cba_class, cbaoi_class, augf_name):
# for small alpha (at sigma 1.0), should barely move
# if thresholds set to zero
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=0.0):
cba = cba_class([(10, 15), (40, 15), (40, 35), (10, 35)])
cbaoi = cbaoi_class([cba], shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = getattr(aug, augf_name)(cbaoi)
assert observed.shape == (50, 50)
assert len(observed.items) == 1
assert observed.items[0].coords_almost_equals(
cba, max_distance=0.5)
if hasattr(observed.items[0], "is_valid"):
assert observed.items[0].is_valid
@classmethod
def _test_image_cbaoi_alignment(cls, cba_class, cbaoi_class, augf_name):
# test alignment between between images and polygons
height_step_size = 50
width_step_size = 30
height_steps = 2 # don't set >2, otherwise polygon will be broken
width_steps = 10
height = (2+height_steps) * height_step_size
width = (2+width_steps) * width_step_size
s = 3
image = np.zeros((height, width), dtype=np.uint8)
points = []
for w in sm.xrange(0, 2+width_steps):
if w not in [0, width_steps+2-1]:
x = width_step_size * w
y = height_step_size
points.append((x, y))
image[y-s:y+s+1, x-s:x+s+1] = 255
for w in sm.xrange(2+width_steps-1, 0, -1):
if w not in [0, width_steps+2-1]:
x = width_step_size * w
y = height_step_size*2
points.append((x, y))
image[y-s:y+s+1, x-s:x+s+1] = 255
cba = cba_class(points)
cbaoi = cbaoi_class([cba], shape=image.shape)
aug = iaa.ElasticTransformation(alpha=100, sigma=7)
aug_det = aug.to_deterministic()
images_aug = aug_det.augment_images([image, image])
cbaois_aug = getattr(aug_det, augf_name)([cbaoi, cbaoi])
count_bad = 0
for image_aug, cbaoi_aug in zip(images_aug, cbaois_aug):
assert cbaoi_aug.shape == image.shape
assert len(cbaoi_aug.items) == 1
for cba_aug in cbaoi_aug.items:
if hasattr(cba_aug, "is_valid"):
assert cba_aug.is_valid
for point_aug in cba_aug.coords:
x, y = point_aug[0], point_aug[1]
bb = ia.BoundingBox(x1=x-2, x2=x+2, y1=y-2, y2=y+2)
img_ex = bb.extract_from_image(image_aug)
if np.any(img_ex > 10):
pass # close to expected location
else:
count_bad += 1
assert count_bad <= 3
@classmethod
def _test_empty_cbaois(cls, cbaoi, augf_name):
aug = iaa.ElasticTransformation(alpha=10, sigma=10)
cbaoi_aug = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(cbaoi_aug, cbaoi)
# -----------
# polygons
# -----------
def test_polygons_no_movement_if_alpha_below_threshold(self):
self._test_cbaois_no_movement_if_alpha_below_threshold(
ia.Polygon, ia.PolygonsOnImage, "augment_polygons")
def test_polygons_no_movement_if_sigma_below_threshold(self):
self._test_cbaois_no_movement_if_sigma_below_threshold(
ia.Polygon, ia.PolygonsOnImage, "augment_polygons")
def test_polygons_small_movement_for_weak_alpha_if_threshold_zero(self):
self._test_cbaois_small_movement_for_weak_alpha_if_threshold_zero(
ia.Polygon, ia.PolygonsOnImage, "augment_polygons")
def test_image_polygon_alignment(self):
self._test_image_cbaoi_alignment(
ia.Polygon, ia.PolygonsOnImage, "augment_polygons")
def test_empty_polygons(self):
cbaoi = ia.PolygonsOnImage([], shape=(10, 10, 3))
self._test_empty_cbaois(cbaoi, "augment_polygons")
# -----------
# line strings
# -----------
def test_line_strings_no_movement_if_alpha_below_threshold(self):
self._test_cbaois_no_movement_if_alpha_below_threshold(
ia.LineString, ia.LineStringsOnImage, "augment_line_strings")
def test_line_strings_no_movement_if_sigma_below_threshold(self):
self._test_cbaois_no_movement_if_sigma_below_threshold(
ia.LineString, ia.LineStringsOnImage, "augment_line_strings")
def test_line_strings_small_movement_for_weak_alpha_if_threshold_zero(self):
self._test_cbaois_small_movement_for_weak_alpha_if_threshold_zero(
ia.LineString, ia.LineStringsOnImage, "augment_line_strings")
def test_image_line_string_alignment(self):
self._test_image_cbaoi_alignment(
ia.LineString, ia.LineStringsOnImage, "augment_line_strings")
def test_empty_line_strings(self):
cbaoi = ia.LineStringsOnImage([], shape=(10, 10, 3))
self._test_empty_cbaois(cbaoi, "augment_line_strings")
# -----------
# bounding boxes
# -----------
def test_bounding_boxes_no_movement_if_alpha_below_threshold(self):
# for small alpha, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=1.0, sigma=0.0):
bbs = [
ia.BoundingBox(x1=10, y1=12, x2=20, y2=22),
ia.BoundingBox(x1=20, y1=32, x2=40, y2=42)
]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = aug.augment_bounding_boxes([bbsoi])[0]
d = bbsoi.to_xyxy_array() - observed.to_xyxy_array()
d = d.reshape((2*2, 2))
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 1e-8
def test_bounding_boxes_no_movement_if_sigma_below_threshold(self):
# for small sigma, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=1.0):
bbs = [
ia.BoundingBox(x1=10, y1=12, x2=20, y2=22),
ia.BoundingBox(x1=20, y1=32, x2=40, y2=42)
]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=1.0, sigma=0.001)
observed = aug.augment_bounding_boxes([bbsoi])[0]
d = bbsoi.to_xyxy_array() - observed.to_xyxy_array()
d = d.reshape((2*2, 2))
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 1e-8
def test_bounding_boxes_small_movement_for_weak_alpha_if_threshold_zero(
self):
# for small alpha (at sigma 1.0), should barely move
# if thresholds set to zero
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=0.0):
bbs = [
ia.BoundingBox(x1=10, y1=12, x2=20, y2=22),
ia.BoundingBox(x1=20, y1=32, x2=40, y2=42)
]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = aug.augment_bounding_boxes([bbsoi])[0]
d = bbsoi.to_xyxy_array() - observed.to_xyxy_array()
d = d.reshape((2*2, 2))
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 0.5
def test_image_bounding_box_alignment(self):
# test alignment between between images and bounding boxes
image = np.zeros((100, 100), dtype=np.uint8)
image[35:35+1, 35:65+1] = 255
image[65:65+1, 35:65+1] = 255
image[35:65+1, 35:35+1] = 255
image[35:65+1, 65:65+1] = 255
bbs = [
ia.BoundingBox(x1=35.5, y1=35.5, x2=65.5, y2=65.5)
]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=image.shape)
aug = iaa.ElasticTransformation(alpha=70, sigma=5)
images_aug, bbsois_aug = aug(images=[image, image],
bounding_boxes=[bbsoi, bbsoi])
count_bad = 0
for image_aug, bbsoi_aug in zip(images_aug, bbsois_aug):
assert bbsoi_aug.shape == (100, 100)
assert len(bbsoi_aug.bounding_boxes) == 1
for bb_aug in bbsoi_aug.bounding_boxes:
if bb_aug.is_fully_within_image(image_aug):
# top, bottom, left, right
x1 = bb_aug.x1_int
x2 = bb_aug.x2_int
y1 = bb_aug.y1_int
y2 = bb_aug.y2_int
top_row = image_aug[y1-2:y1+2, x1-2:x2+2]
btm_row = image_aug[y2-2:y2+2, x1-2:x2+2]
lft_row = image_aug[y1-2:y2+2, x1-2:x1+2]
rgt_row = image_aug[y1-2:y2+2, x2-2:x2+2]
assert np.max(top_row) > 10
assert np.max(btm_row) > 10
assert np.max(lft_row) > 10
assert np.max(rgt_row) > 10
else:
count_bad += 1
assert count_bad <= 1
def test_empty_bounding_boxes(self):
aug = iaa.ElasticTransformation(alpha=10, sigma=10)
bbsoi = ia.BoundingBoxesOnImage([], shape=(10, 10, 3))
bbsoi_aug = aug.augment_bounding_boxes(bbsoi)
assert len(bbsoi_aug.bounding_boxes) == 0
assert bbsoi_aug.shape == (10, 10, 3)
# -----------
# heatmaps alignment
# -----------
def test_image_heatmaps_alignment(self):
# test alignment between images and heatmaps
img = np.zeros((80, 80), dtype=np.uint8)
img[:, 30:50] = 255
img[30:50, :] = 255
hm = HeatmapsOnImage(img.astype(np.float32)/255.0, shape=(80, 80))
aug = iaa.ElasticTransformation(alpha=60.0, sigma=4.0, mode="constant",
cval=0)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(img)
hm_aug = aug_det.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = hm_aug.arr_0to1 > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (80, 80)
assert hm_aug.arr_0to1.shape == (80, 80, 1)
assert (same / img_aug_mask.size) >= 0.99
def test_image_heatmaps_alignment_if_heatmaps_smaller_than_image(self):
# test alignment between images and heatmaps
# here with heatmaps that are smaller than the image
img = np.zeros((80, 80), dtype=np.uint8)
img[:, 30:50] = 255
img[30:50, :] = 255
img_small = ia.imresize_single_image(
img, (40, 40), interpolation="nearest")
hm = HeatmapsOnImage(
img_small.astype(np.float32)/255.0,
shape=(80, 80))
aug = iaa.ElasticTransformation(
alpha=60.0, sigma=4.0, mode="constant", cval=0)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(img)
hm_aug = aug_det.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, (80, 80), interpolation="nearest"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (80, 80)
assert hm_aug.arr_0to1.shape == (40, 40, 1)
assert (same / img_aug_mask.size) >= 0.94
# -----------
# segmaps alignment
# -----------
def test_image_segmaps_alignment(self):
# test alignment between images and segmaps
img = np.zeros((80, 80), dtype=np.uint8)
img[:, 30:50] = 255
img[30:50, :] = 255
segmaps = SegmentationMapsOnImage(
(img > 0).astype(np.int32),
shape=(80, 80))
aug = iaa.ElasticTransformation(
alpha=60.0, sigma=4.0, mode="constant", cval=0, order=0)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(img)
segmaps_aug = aug_det.augment_segmentation_maps([segmaps])[0]
img_aug_mask = img_aug > 255*0.1
segmaps_aug_mask = segmaps_aug.arr > 0
same = np.sum(img_aug_mask == segmaps_aug_mask[:, :, 0])
assert segmaps_aug.shape == (80, 80)
assert segmaps_aug.arr.shape == (80, 80, 1)
assert (same / img_aug_mask.size) >= 0.99
def test_image_segmaps_alignment_if_heatmaps_smaller_than_image(self):
# test alignment between images and segmaps
# here with segmaps that are smaller than the image
img = np.zeros((80, 80), dtype=np.uint8)
img[:, 30:50] = 255
img[30:50, :] = 255
img_small = ia.imresize_single_image(
img, (40, 40), interpolation="nearest")
segmaps = SegmentationMapsOnImage(
(img_small > 0).astype(np.int32), shape=(80, 80))
aug = iaa.ElasticTransformation(
alpha=60.0, sigma=4.0, mode="constant", cval=0, order=0)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(img)
segmaps_aug = aug_det.augment_segmentation_maps([segmaps])[0]
img_aug_mask = img_aug > 255*0.1
segmaps_aug_mask = ia.imresize_single_image(
segmaps_aug.arr, (80, 80), interpolation="nearest") > 0
same = np.sum(img_aug_mask == segmaps_aug_mask[:, :, 0])
assert segmaps_aug.shape == (80, 80)
assert segmaps_aug.arr.shape == (40, 40, 1)
assert (same / img_aug_mask.size) >= 0.94
# ---------
# unusual channel numbers
# ---------
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ElasticTransformation(alpha=2.0, sigma=2.0)
image_aug = aug(image=image)
assert np.all(image_aug == 0)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
# ---------
# zero-sized axes
# ---------
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
for keep_size in [False, True]:
with self.subTest(shape=shape, keep_size=keep_size):
for _ in sm.xrange(3):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ElasticTransformation(alpha=2.0, sigma=2.0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
# -----------
# get_parameters
# -----------
def test_get_parameters(self):
aug = iaa.ElasticTransformation(
alpha=0.25, sigma=1.0, order=2, cval=10, mode="constant")
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert isinstance(params[3], iap.Deterministic)
assert isinstance(params[4], iap.Deterministic)
assert 0.25 - 1e-8 < params[0].value < 0.25 + 1e-8
assert 1.0 - 1e-8 < params[1].value < 1.0 + 1e-8
assert params[2].value == 2
assert params[3].value == 10
assert params[4].value == "constant"
# -----------
# other dtypes
# -----------
def test_other_dtypes_bool(self):
aug = iaa.ElasticTransformation(sigma=0.5, alpha=5, order=0)
mask = np.zeros((21, 21), dtype=bool)
mask[7:13, 7:13] = True
image = np.zeros((21, 21), dtype=bool)
image[mask] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert not np.all(image_aug == 1)
assert np.any(image_aug[~mask] == 1)
def test_other_dtypes_uint_int(self):
aug = iaa.ElasticTransformation(sigma=0.5, alpha=5, order=0)
mask = np.zeros((21, 21), dtype=bool)
mask[7:13, 7:13] = True
dtypes = ["uint8", "uint16", "uint32", "int8", "int16", "int32"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((21, 21), dtype=dtype)
image[7:13, 7:13] = max_value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert not np.all(image_aug == max_value)
assert np.any(image_aug[~mask] == max_value)
def test_other_dtypes_float(self):
aug = iaa.ElasticTransformation(sigma=0.5, alpha=5, order=0)
mask = np.zeros((21, 21), dtype=bool)
mask[7:13, 7:13] = True
for dtype in ["float16", "float32", "float64"]:
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((21, 21), dtype=dtype)
image[7:13, 7:13] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert not np.all(_isclose(image_aug, np.float128(value)))
assert np.any(_isclose(image_aug[~mask],
np.float128(value)))
def test_other_dtypes_bool_all_orders(self):
mask = np.zeros((50, 50), dtype=bool)
mask[10:40, 20:30] = True
mask[20:30, 10:40] = True
for order in [0, 1, 2, 3, 4, 5]:
aug = iaa.ElasticTransformation(sigma=1.0, alpha=50, order=order)
image = np.zeros((50, 50), dtype=bool)
image[mask] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert not np.all(image_aug == 1)
assert np.any(image_aug[~mask] == 1)
def test_other_dtypes_uint_int_all_orders(self):
mask = np.zeros((50, 50), dtype=bool)
mask[10:40, 20:30] = True
mask[20:30, 10:40] = True
for order in [0, 1, 2, 3, 4, 5]:
aug = iaa.ElasticTransformation(sigma=1.0, alpha=50, order=order)
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
if order == 0:
dtypes = ["uint8", "uint16", "uint32",
"int8", "int16", "int32"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = max_value - min_value
image = np.zeros((50, 50), dtype=dtype)
image[mask] = max_value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
if order == 0:
assert not np.all(image_aug == max_value)
assert np.any(image_aug[~mask] == max_value)
else:
atol = 0.1 * dynamic_range
assert not np.all(
np.isclose(image_aug,
max_value,
rtol=0, atol=atol)
)
assert np.any(
np.isclose(image_aug[~mask],
max_value,
rtol=0, atol=atol))
def test_other_dtypes_float_all_orders(self):
mask = np.zeros((50, 50), dtype=bool)
mask[10:40, 20:30] = True
mask[20:30, 10:40] = True
for order in [0, 1, 2, 3, 4, 5]:
aug = iaa.ElasticTransformation(sigma=1.0, alpha=50, order=order)
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
value = (
0.1 * max_value
if dtype != "float64"
else 0.0001 * max_value)
image = np.zeros((50, 50), dtype=dtype)
image[mask] = value
image_aug = aug.augment_image(image)
if order == 0:
assert image_aug.dtype.name == dtype
assert not np.all(
_isclose(image_aug, np.float128(value))
)
assert np.any(
_isclose(image_aug[~mask], np.float128(value))
)
else:
atol = (
10
if dtype == "float16"
else 0.00001 * max_value)
assert not np.all(
np.isclose(
image_aug,
np.float128(value),
rtol=0, atol=atol
))
assert np.any(
np.isclose(
image_aug[~mask],
np.float128(value),
rtol=0, atol=atol
))
class _TwoValueParam(iap.StochasticParameter):
def __init__(self, v1, v2):
super(_TwoValueParam, self).__init__()
self.v1 = v1
self.v2 = v2
def _draw_samples(self, size, random_state):
arr = np.full(size, self.v1, dtype=np.int32)
arr[1::2] = self.v2
return arr
class TestRot90(unittest.TestCase):
@property
def kp_offset(self):
# set this to -1 when using integer-based KP rotation instead of
# subpixel/float-based rotation
return 0
@property
def image(self):
return np.arange(4*4*3).reshape((4, 4, 3)).astype(np.uint8)
@property
def heatmaps(self):
return HeatmapsOnImage(self.image[..., 0:1].astype(np.float32) / 255,
shape=(4, 4, 3))
@property
def heatmaps_smaller(self):
return HeatmapsOnImage(
np.float32([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]), shape=(4, 8, 3))
@property
def segmaps(self):
return SegmentationMapsOnImage(
self.image[..., 0:1].astype(np.int32), shape=(4, 4, 3))
@property
def segmaps_smaller(self):
return SegmentationMapsOnImage(
np.int32([[0, 1, 2], [3, 4, 5]]), shape=(4, 8, 3))
@property
def kpsoi(self):
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=2, y=3)]
return ia.KeypointsOnImage(kps, shape=(4, 8, 3))
@property
def psoi(self):
return ia.PolygonsOnImage(
[ia.Polygon([(1, 1), (3, 1), (3, 3), (1, 3)])],
shape=(4, 8, 3)
)
@property
def lsoi(self):
return ia.LineStringsOnImage(
[ia.LineString([(1, 1), (3, 1), (3, 3), (1, 3)])],
shape=(4, 8, 3)
)
@property
def bbsoi(self):
return ia.BoundingBoxesOnImage(
[ia.BoundingBox(x1=1, y1=1, x2=3, y2=3)],
shape=(4, 8, 3)
)
@property
def kpsoi_k1(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_kps = [(4-2+kp_offset, 1),
(4-3+kp_offset, 2)]
kps = [ia.Keypoint(x, y) for x, y in expected_k1_kps]
return ia.KeypointsOnImage(kps, shape=(8, 4, 3))
@property
def kpsoi_k2(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_kps = self.kpsoi_k1.to_xy_array()
expected_k2_kps = [
(8-expected_k1_kps[0][1]+kp_offset, expected_k1_kps[0][0]),
(8-expected_k1_kps[1][1]+kp_offset, expected_k1_kps[1][0])]
kps = [ia.Keypoint(x, y) for x, y in expected_k2_kps]
return ia.KeypointsOnImage(kps, shape=(4, 8, 3))
@property
def kpsoi_k3(self):
# without keep size
kp_offset = self.kp_offset
expected_k2_kps = self.kpsoi_k2.to_xy_array()
expected_k3_kps = [
(4-expected_k2_kps[0][1]+kp_offset, expected_k2_kps[0][0]),
(4-expected_k2_kps[1][1]+kp_offset, expected_k2_kps[1][0])]
kps = [ia.Keypoint(x, y) for x, y in expected_k3_kps]
return ia.KeypointsOnImage(kps, shape=(8, 4, 3))
@property
def psoi_k1(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_polys = [(4-1+kp_offset, 1),
(4-1+kp_offset, 3),
(4-3+kp_offset, 3),
(4-3+kp_offset, 1)]
return ia.PolygonsOnImage([ia.Polygon(expected_k1_polys)],
shape=(8, 4, 3))
@property
def psoi_k2(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_polys = self.psoi_k1.polygons[0].exterior
expected_k2_polys = [
(8-expected_k1_polys[0][1]+kp_offset, expected_k1_polys[0][0]),
(8-expected_k1_polys[1][1]+kp_offset, expected_k1_polys[1][0]),
(8-expected_k1_polys[2][1]+kp_offset, expected_k1_polys[2][0]),
(8-expected_k1_polys[3][1]+kp_offset, expected_k1_polys[3][0])]
return ia.PolygonsOnImage([ia.Polygon(expected_k2_polys)],
shape=(4, 8, 3))
@property
def psoi_k3(self):
# without keep size
kp_offset = self.kp_offset
expected_k2_polys = self.psoi_k2.polygons[0].exterior
expected_k3_polys = [
(4-expected_k2_polys[0][1]+kp_offset, expected_k2_polys[0][0]),
(4-expected_k2_polys[1][1]+kp_offset, expected_k2_polys[1][0]),
(4-expected_k2_polys[2][1]+kp_offset, expected_k2_polys[2][0]),
(4-expected_k2_polys[3][1]+kp_offset, expected_k2_polys[3][0])]
return ia.PolygonsOnImage([ia.Polygon(expected_k3_polys)],
shape=(8, 4, 3))
@property
def lsoi_k1(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_ls = [(4-1+kp_offset, 1),
(4-1+kp_offset, 3),
(4-3+kp_offset, 3),
(4-3+kp_offset, 1)]
return ia.LineStringsOnImage([ia.LineString(expected_k1_ls)],
shape=(8, 4, 3))
@property
def lsoi_k2(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_ls = self.psoi_k1.items[0].coords
expected_k2_ls = [
(8-expected_k1_ls[0][1]+kp_offset, expected_k1_ls[0][0]),
(8-expected_k1_ls[1][1]+kp_offset, expected_k1_ls[1][0]),
(8-expected_k1_ls[2][1]+kp_offset, expected_k1_ls[2][0]),
(8-expected_k1_ls[3][1]+kp_offset, expected_k1_ls[3][0])]
return ia.LineStringsOnImage([ia.LineString(expected_k2_ls)],
shape=(4, 8, 3))
@property
def lsoi_k3(self):
# without keep size
kp_offset = self.kp_offset
expected_k2_ls = self.lsoi_k2.items[0].coords
expected_k3_ls = [
(4-expected_k2_ls[0][1]+kp_offset, expected_k2_ls[0][0]),
(4-expected_k2_ls[1][1]+kp_offset, expected_k2_ls[1][0]),
(4-expected_k2_ls[2][1]+kp_offset, expected_k2_ls[2][0]),
(4-expected_k2_ls[3][1]+kp_offset, expected_k2_ls[3][0])]
return ia.LineStringsOnImage([ia.LineString(expected_k3_ls)],
shape=(8, 4, 3))
@property
def bbsoi_k1(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_coords = [
(4-1+kp_offset, 1),
(4-3+kp_offset, 3)]
return ia.BoundingBoxesOnImage([
ia.BoundingBox(
x1=min(expected_k1_coords[0][0], expected_k1_coords[1][0]),
y1=min(expected_k1_coords[0][1], expected_k1_coords[1][1]),
x2=max(expected_k1_coords[1][0], expected_k1_coords[0][0]),
y2=max(expected_k1_coords[1][1], expected_k1_coords[0][1])
)], shape=(8, 4, 3))
@property
def bbsoi_k2(self):
# without keep size
kp_offset = self.kp_offset
coords = self.bbsoi_k1.bounding_boxes[0].coords
expected_k2_coords = [
(8-coords[0][1]+kp_offset, coords[0][0]),
(8-coords[1][1]+kp_offset, coords[1][0])]
return ia.BoundingBoxesOnImage([
ia.BoundingBox(
x1=min(expected_k2_coords[0][0], expected_k2_coords[1][0]),
y1=min(expected_k2_coords[0][1], expected_k2_coords[1][1]),
x2=max(expected_k2_coords[1][0], expected_k2_coords[0][0]),
y2=max(expected_k2_coords[1][1], expected_k2_coords[0][1])
)],
shape=(4, 8, 3))
@property
def bbsoi_k3(self):
# without keep size
kp_offset = self.kp_offset
coords = self.bbsoi_k2.bounding_boxes[0].coords
expected_k3_coords = [
(4-coords[0][1]+kp_offset, coords[0][0]),
(4-coords[1][1]+kp_offset, coords[1][0])]
return ia.BoundingBoxesOnImage([
ia.BoundingBox(
x1=min(expected_k3_coords[0][0], expected_k3_coords[1][0]),
y1=min(expected_k3_coords[0][1], expected_k3_coords[1][1]),
x2=max(expected_k3_coords[1][0], expected_k3_coords[0][0]),
y2=max(expected_k3_coords[1][1], expected_k3_coords[0][1])
)],
shape=(8, 4, 3))
def test___init___k_is_list(self):
aug = iaa.Rot90([1, 3])
assert isinstance(aug.k, iap.Choice)
assert len(aug.k.a) == 2
assert aug.k.a[0] == 1
assert aug.k.a[1] == 3
def test___init___k_is_all(self):
aug = iaa.Rot90(ia.ALL)
assert isinstance(aug.k, iap.Choice)
assert len(aug.k.a) == 4
assert aug.k.a == [0, 1, 2, 3]
def test_images_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
img_aug = aug.augment_image(self.image)
assert img_aug.dtype.name == "uint8"
assert np.array_equal(img_aug, self.image)
def test_heatmaps_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
hms_aug = aug.augment_heatmaps([self.heatmaps])[0]
assert (hms_aug.arr_0to1.dtype.name
== self.heatmaps.arr_0to1.dtype.name)
assert np.allclose(hms_aug.arr_0to1, self.heatmaps.arr_0to1)
assert hms_aug.shape == self.heatmaps.shape
def test_segmaps_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
segmaps_aug = aug.augment_segmentation_maps(
[self.segmaps]
)[0]
assert (
segmaps_aug.arr.dtype.name
== self.segmaps.arr.dtype.name)
assert np.allclose(segmaps_aug.arr, self.segmaps.arr)
assert segmaps_aug.shape == self.segmaps.shape
def test_keypoints_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
kpsoi_aug = aug.augment_keypoints([self.kpsoi])[0]
assert_cbaois_equal(kpsoi_aug, self.kpsoi)
def test_polygons_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
psoi_aug = aug.augment_polygons(self.psoi)
assert_cbaois_equal(psoi_aug, self.psoi)
def test_line_strings_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
lsoi_aug = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(lsoi_aug, self.lsoi)
def test_bounding_boxes_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
bbsoi_aug = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(bbsoi_aug, self.bbsoi)
def test_images_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
img_aug = aug.augment_image(self.image)
assert img_aug.dtype.name == "uint8"
assert np.array_equal(img_aug,
np.rot90(self.image, 1, axes=(1, 0)))
def test_heatmaps_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
hms_aug = aug.augment_heatmaps([self.heatmaps])[0]
assert (hms_aug.arr_0to1.dtype.name
== self.heatmaps.arr_0to1.dtype.name)
assert np.allclose(
hms_aug.arr_0to1,
np.rot90(self.heatmaps.arr_0to1, 1, axes=(1, 0)))
assert hms_aug.shape == (4, 4, 3)
def test_heatmaps_smaller_than_image_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
hms_smaller_aug = aug.augment_heatmaps(
[self.heatmaps_smaller]
)[0]
assert (
hms_smaller_aug.arr_0to1.dtype.name
== self.heatmaps_smaller.arr_0to1.dtype.name)
assert np.allclose(
hms_smaller_aug.arr_0to1,
np.rot90(self.heatmaps_smaller.arr_0to1, 1, axes=(1, 0)))
assert hms_smaller_aug.shape == (8, 4, 3)
def test_segmaps_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
segmaps_aug = aug.augment_segmentation_maps(
[self.segmaps]
)[0]
assert (
segmaps_aug.arr.dtype.name
== self.segmaps.arr.dtype.name)
assert np.allclose(
segmaps_aug.arr,
np.rot90(self.segmaps.arr, 1, axes=(1, 0)))
assert segmaps_aug.shape == (4, 4, 3)
def test_segmaps_smaller_than_image_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
segmaps_smaller_aug = aug.augment_segmentation_maps(
self.segmaps_smaller)
assert (
segmaps_smaller_aug.arr.dtype.name
== self.segmaps_smaller.arr.dtype.name)
assert np.allclose(
segmaps_smaller_aug.arr,
np.rot90(self.segmaps_smaller.arr, 1, axes=(1, 0)))
assert segmaps_smaller_aug.shape == (8, 4, 3)
def test_keypoints_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
kpsoi_aug = aug.augment_keypoints([self.kpsoi])[0]
assert_cbaois_equal(kpsoi_aug, self.kpsoi_k1)
def test_polygons_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
psoi_aug = aug.augment_polygons(self.psoi)
assert_cbaois_equal(psoi_aug, self.psoi_k1)
def test_line_strings_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
lsoi_aug = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(lsoi_aug, self.lsoi_k1)
def test_bounding_boxes_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
bbsoi_aug = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(bbsoi_aug, self.bbsoi_k1)
def test_images_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
img = self.image
img_aug = aug.augment_image(img)
assert img_aug.dtype.name == "uint8"
assert np.array_equal(img_aug, np.rot90(img, 2, axes=(1, 0)))
def test_heatmaps_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
hms = self.heatmaps
hms_aug = aug.augment_heatmaps([hms])[0]
assert hms_aug.arr_0to1.dtype.name == hms.arr_0to1.dtype.name
assert np.allclose(
hms_aug.arr_0to1,
np.rot90(hms.arr_0to1, 2, axes=(1, 0)))
assert hms_aug.shape == (4, 4, 3)
def test_heatmaps_smaller_than_image_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
hms_smaller = self.heatmaps_smaller
hms_smaller_aug = aug.augment_heatmaps([hms_smaller])[0]
assert (hms_smaller_aug.arr_0to1.dtype.name
== hms_smaller.arr_0to1.dtype.name)
assert np.allclose(
hms_smaller_aug.arr_0to1,
np.rot90(hms_smaller.arr_0to1, 2, axes=(1, 0)))
assert hms_smaller_aug.shape == (4, 8, 3)
def test_segmaps_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
segmaps = self.segmaps
segmaps_aug = aug.augment_segmentation_maps([segmaps])[0]
assert segmaps_aug.arr.dtype.name == segmaps.arr.dtype.name
assert np.allclose(
segmaps_aug.arr,
np.rot90(segmaps.arr, 2, axes=(1, 0)))
assert segmaps_aug.shape == (4, 4, 3)
def test_segmaps_smaller_than_image_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
segmaps_smaller = self.segmaps_smaller
segmaps_smaller_aug = aug.augment_segmentation_maps(segmaps_smaller)
assert (segmaps_smaller_aug.arr.dtype.name
== segmaps_smaller.arr.dtype.name)
assert np.allclose(
segmaps_smaller_aug.arr,
np.rot90(segmaps_smaller.arr, 2, axes=(1, 0)))
assert segmaps_smaller_aug.shape == (4, 8, 3)
def test_keypoints_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
kpsoi_aug = aug.augment_keypoints([self.kpsoi])[0]
assert_cbaois_equal(kpsoi_aug, self.kpsoi_k2)
def test_polygons_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
psoi_aug = aug.augment_polygons(self.psoi)
assert_cbaois_equal(psoi_aug, self.psoi_k2)
def test_line_strings_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
lsoi_aug = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(lsoi_aug, self.lsoi_k2)
def test_bounding_boxes_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
bbsoi_aug = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(bbsoi_aug, self.bbsoi_k2)
def test_images_k_is_3_and_minus1(self):
img = self.image
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
img_aug = aug.augment_image(img)
assert img_aug.dtype.name == "uint8"
assert np.array_equal(img_aug, np.rot90(img, 3, axes=(1, 0)))
def test_heatmaps_k_is_3_and_minus1(self):
hms = self.heatmaps
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
hms_aug = aug.augment_heatmaps([hms])[0]
assert (hms_aug.arr_0to1.dtype.name
== hms.arr_0to1.dtype.name)
assert np.allclose(
hms_aug.arr_0to1,
np.rot90(hms.arr_0to1, 3, axes=(1, 0)))
assert hms_aug.shape == (4, 4, 3)
def test_heatmaps_smaller_than_image_k_is_3_and_minus1(self):
hms_smaller = self.heatmaps_smaller
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
hms_smaller_aug = aug.augment_heatmaps([hms_smaller])[0]
assert (hms_smaller_aug.arr_0to1.dtype.name
== hms_smaller.arr_0to1.dtype.name)
assert np.allclose(
hms_smaller_aug.arr_0to1,
np.rot90(hms_smaller.arr_0to1, 3, axes=(1, 0)))
assert hms_smaller_aug.shape == (8, 4, 3)
def test_segmaps_k_is_3_and_minus1(self):
segmaps = self.segmaps
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
segmaps_aug = aug.augment_segmentation_maps([segmaps])[0]
assert (segmaps_aug.arr.dtype.name
== segmaps.arr.dtype.name)
assert np.allclose(
segmaps_aug.arr,
np.rot90(segmaps.arr, 3, axes=(1, 0)))
assert segmaps_aug.shape == (4, 4, 3)
def test_segmaps_smaller_than_image_k_is_3_and_minus1(self):
segmaps_smaller = self.segmaps_smaller
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
segmaps_smaller_aug = aug.augment_segmentation_maps(
segmaps_smaller)
assert (segmaps_smaller_aug.arr.dtype.name
== segmaps_smaller.arr.dtype.name)
assert np.allclose(
segmaps_smaller_aug.arr,
np.rot90(segmaps_smaller.arr, 3, axes=(1, 0)))
assert segmaps_smaller_aug.shape == (8, 4, 3)
def test_keypoints_k_is_3_and_minus1(self):
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
kpsoi_aug = aug.augment_keypoints([self.kpsoi])[0]
assert_cbaois_equal(kpsoi_aug, self.kpsoi_k3)
def test_polygons_k_is_3_and_minus1(self):
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
psoi_aug = aug.augment_polygons(self.psoi)
assert_cbaois_equal(psoi_aug, self.psoi_k3)
def test_line_strings_k_is_3_and_minus1(self):
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
lsoi_aug = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(lsoi_aug, self.lsoi_k3)
def test_bounding_boxes_k_is_3_and_minus1(self):
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
bbsoi_aug = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(bbsoi_aug, self.bbsoi_k3)
def test_images_k_is_1_verify_without_using_numpy_rot90(self):
# verify once without np.rot90
aug = iaa.Rot90(k=1, keep_size=False)
image = np.uint8([[1, 0, 0],
[0, 2, 0]])
img_aug = aug.augment_image(image)
expected = np.uint8([[0, 1], [2, 0], [0, 0]])
assert np.array_equal(img_aug, expected)
def test_images_k_is_1_keep_size_is_true(self):
# keep_size=True, k=1
aug = iaa.Rot90(1, keep_size=True)
img_nonsquare = np.arange(5*4*3).reshape((5, 4, 3)).astype(np.uint8)
img_aug = aug.augment_image(img_nonsquare)
assert img_aug.dtype.name == "uint8"
assert np.array_equal(
img_aug,
ia.imresize_single_image(
np.rot90(img_nonsquare, 1, axes=(1, 0)),
(5, 4)
)
)
def test_heatmaps_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
hms = self.heatmaps
hms_aug = aug.augment_heatmaps([hms])[0]
assert hms_aug.arr_0to1.dtype.name == hms.arr_0to1.dtype.name
assert np.allclose(
hms_aug.arr_0to1,
np.rot90(hms.arr_0to1, 1, axes=(1, 0)))
assert hms_aug.shape == (4, 4, 3)
def test_heatmaps_smaller_than_image_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
hms_smaller = self.heatmaps_smaller
hms_smaller_aug = aug.augment_heatmaps([hms_smaller])[0]
hms_smaller_rot = np.rot90(hms_smaller.arr_0to1, 1, axes=(1, 0))
hms_smaller_rot = np.clip(
ia.imresize_single_image(
hms_smaller_rot, (2, 3), interpolation="cubic"
),
0.0, 1.0)
assert (hms_smaller_aug.arr_0to1.dtype.name
== hms_smaller.arr_0to1.dtype.name)
assert np.allclose(hms_smaller_aug.arr_0to1, hms_smaller_rot)
assert hms_smaller_aug.shape == (4, 8, 3)
def test_segmaps_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
segmaps = self.segmaps
segmaps_aug = aug.augment_segmentation_maps([segmaps])[0]
assert (segmaps_aug.arr.dtype.name
== segmaps.arr.dtype.name)
assert np.allclose(segmaps_aug.arr,
np.rot90(segmaps.arr, 1, axes=(1, 0)))
assert segmaps_aug.shape == (4, 4, 3)
def test_segmaps_smaller_than_image_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
segmaps_smaller = self.segmaps_smaller
segmaps_smaller_aug = aug.augment_segmentation_maps(segmaps_smaller)
segmaps_smaller_rot = np.rot90(segmaps_smaller.arr, 1, axes=(1, 0))
segmaps_smaller_rot = ia.imresize_single_image(
segmaps_smaller_rot, (2, 3), interpolation="nearest")
assert (segmaps_smaller_aug.arr.dtype.name
== segmaps_smaller.arr.dtype.name)
assert np.allclose(segmaps_smaller_aug.arr, segmaps_smaller_rot)
assert segmaps_smaller_aug.shape == (4, 8, 3)
def test_keypoints_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
kp_offset = self.kp_offset
kpsoi = self.kpsoi
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
expected = [(4-2+kp_offset, 1), (4-3+kp_offset, 2)]
expected = [(8*x/4, 4*y/8) for x, y in expected]
assert kpsoi_aug.shape == (4, 8, 3)
for kp_aug, kp in zip(kpsoi_aug.keypoints, expected):
assert np.allclose([kp_aug.x, kp_aug.y], [kp[0], kp[1]])
def test_polygons_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
psoi = self.psoi
kp_offset = self.kp_offset
psoi_aug = aug.augment_polygons(psoi)
expected = [(4-1+kp_offset, 1), (4-1+kp_offset, 3),
(4-3+kp_offset, 3), (4-3+kp_offset, 1)]
expected = [(8*x/4, 4*y/8) for x, y in expected]
assert psoi_aug.shape == (4, 8, 3)
assert len(psoi_aug.polygons) == 1
assert psoi_aug.polygons[0].is_valid
assert psoi_aug.polygons[0].exterior_almost_equals(expected)
def test_line_strings_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
lsoi = self.lsoi
kp_offset = self.kp_offset
lsoi_aug = aug.augment_line_strings(lsoi)
expected = [(4-1+kp_offset, 1), (4-1+kp_offset, 3),
(4-3+kp_offset, 3), (4-3+kp_offset, 1)]
expected = [(8*x/4, 4*y/8) for x, y in expected]
assert lsoi_aug.shape == (4, 8, 3)
assert len(lsoi_aug.items) == 1
assert lsoi_aug.items[0].coords_almost_equals(expected)
def test_bounding_boxes_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
bbsoi = self.bbsoi
kp_offset = self.kp_offset
bbsoi_aug = aug.augment_bounding_boxes(bbsoi)
expected = [(4-1+kp_offset, 1),
(4-3+kp_offset, 3)]
expected = [(8*x/4, 4*y/8) for x, y in expected]
expected = np.float32([
[min(expected[0][0], expected[1][0]),
min(expected[0][1], expected[1][1])],
[max(expected[0][0], expected[1][0]),
max(expected[0][1], expected[1][1])]
])
assert bbsoi_aug.shape == (4, 8, 3)
assert len(bbsoi_aug.bounding_boxes) == 1
assert bbsoi_aug.bounding_boxes[0].coords_almost_equals(expected)
def test_images_k_is_list(self):
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
img = self.image
imgs_aug = aug.augment_images([img] * 4)
assert np.array_equal(imgs_aug[0], np.rot90(img, 1, axes=(1, 0)))
assert np.array_equal(imgs_aug[1], np.rot90(img, 2, axes=(1, 0)))
assert np.array_equal(imgs_aug[2], np.rot90(img, 1, axes=(1, 0)))
assert np.array_equal(imgs_aug[3], np.rot90(img, 2, axes=(1, 0)))
def test_heatmaps_smaller_than_image_k_is_list(self):
def _rot_hm(hm, k):
return np.rot90(hm.arr_0to1, k, axes=(1, 0))
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
hms_smaller = self.heatmaps_smaller
hms_aug = aug.augment_heatmaps([hms_smaller] * 4)
assert hms_aug[0].shape == (8, 4, 3)
assert hms_aug[1].shape == (4, 8, 3)
assert hms_aug[2].shape == (8, 4, 3)
assert hms_aug[3].shape == (4, 8, 3)
assert np.allclose(hms_aug[0].arr_0to1, _rot_hm(hms_smaller, 1))
assert np.allclose(hms_aug[1].arr_0to1, _rot_hm(hms_smaller, 2))
assert np.allclose(hms_aug[2].arr_0to1, _rot_hm(hms_smaller, 1))
assert np.allclose(hms_aug[3].arr_0to1, _rot_hm(hms_smaller, 2))
def test_segmaps_smaller_than_image_k_is_list(self):
def _rot_sm(segmap, k):
return np.rot90(segmap.arr, k, axes=(1, 0))
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
segmaps_smaller = self.segmaps_smaller
segmaps_aug = aug.augment_segmentation_maps([segmaps_smaller] * 4)
assert segmaps_aug[0].shape == (8, 4, 3)
assert segmaps_aug[1].shape == (4, 8, 3)
assert segmaps_aug[2].shape == (8, 4, 3)
assert segmaps_aug[3].shape == (4, 8, 3)
assert np.allclose(segmaps_aug[0].arr, _rot_sm(segmaps_smaller, 1))
assert np.allclose(segmaps_aug[1].arr, _rot_sm(segmaps_smaller, 2))
assert np.allclose(segmaps_aug[2].arr, _rot_sm(segmaps_smaller, 1))
assert np.allclose(segmaps_aug[3].arr, _rot_sm(segmaps_smaller, 2))
def test_keypoints_k_is_list(self):
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
kpsoi = self.kpsoi
kpsoi_aug = aug.augment_keypoints([kpsoi] * 4)
assert_cbaois_equal(kpsoi_aug[0], self.kpsoi_k1)
assert_cbaois_equal(kpsoi_aug[1], self.kpsoi_k2)
assert_cbaois_equal(kpsoi_aug[2], self.kpsoi_k1)
assert_cbaois_equal(kpsoi_aug[3], self.kpsoi_k2)
def test_polygons_k_is_list(self):
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
psoi = self.psoi
psoi_aug = aug.augment_polygons([psoi] * 4)
assert_cbaois_equal(psoi_aug[0], self.psoi_k1)
assert_cbaois_equal(psoi_aug[1], self.psoi_k2)
assert_cbaois_equal(psoi_aug[2], self.psoi_k1)
assert_cbaois_equal(psoi_aug[3], self.psoi_k2)
def test_line_strings_k_is_list(self):
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
lsoi = self.lsoi
lsoi_aug = aug.augment_line_strings([lsoi] * 4)
assert_cbaois_equal(lsoi_aug[0], self.lsoi_k1)
assert_cbaois_equal(lsoi_aug[1], self.lsoi_k2)
assert_cbaois_equal(lsoi_aug[2], self.lsoi_k1)
assert_cbaois_equal(lsoi_aug[3], self.lsoi_k2)
def test_bounding_boxes_k_is_list(self):
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
bbsoi = self.bbsoi
bbsoi_aug = aug.augment_bounding_boxes([bbsoi] * 4)
assert_cbaois_equal(bbsoi_aug[0], self.bbsoi_k1)
assert_cbaois_equal(bbsoi_aug[1], self.bbsoi_k2)
assert_cbaois_equal(bbsoi_aug[2], self.bbsoi_k1)
assert_cbaois_equal(bbsoi_aug[3], self.bbsoi_k2)
def test_empty_keypoints(self):
aug = iaa.Rot90(k=1, keep_size=False)
kpsoi = ia.KeypointsOnImage([], shape=(4, 8, 3))
kpsoi_aug = aug.augment_keypoints(kpsoi)
expected = self.kpsoi_k1
expected.keypoints = []
assert_cbaois_equal(kpsoi_aug, expected)
def test_empty_polygons(self):
aug = iaa.Rot90(k=1, keep_size=False)
psoi = ia.PolygonsOnImage([], shape=(4, 8, 3))
psoi_aug = aug.augment_polygons(psoi)
expected = self.psoi_k1
expected.polygons = []
assert_cbaois_equal(psoi_aug, expected)
def test_empty_line_strings(self):
aug = iaa.Rot90(k=1, keep_size=False)
lsoi = ia.LineStringsOnImage([], shape=(4, 8, 3))
lsoi_aug = aug.augment_line_strings(lsoi)
expected = self.lsoi_k1
expected.line_strings = []
assert_cbaois_equal(lsoi_aug, expected)
def test_empty_bounding_boxes(self):
aug = iaa.Rot90(k=1, keep_size=False)
bbsoi = ia.BoundingBoxesOnImage([], shape=(4, 8, 3))
bbsoi_aug = aug.augment_bounding_boxes(bbsoi)
expected = self.bbsoi_k1
expected.bounding_boxes = []
assert_cbaois_equal(bbsoi_aug, expected)
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Rot90(k=1)
image_aug = aug(image=image)
shape_expected = tuple([shape[1], shape[0]] + list(shape[2:]))
assert np.all(image_aug == 0)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape_expected
def test_zero_sized_axes_k_0_or_2(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
for keep_size in [False, True]:
with self.subTest(shape=shape, keep_size=keep_size):
for _ in sm.xrange(10):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Rot90([0, 2], keep_size=keep_size)
image_aug = aug(image=image)
assert image_aug.shape == shape
def test_zero_sized_axes_k_1_or_3_no_keep_size(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
for _ in sm.xrange(10):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Rot90([1, 3], keep_size=False)
image_aug = aug(image=image)
shape_expected = tuple([shape[1], shape[0]]
+ list(shape[2:]))
assert image_aug.shape == shape_expected
def test_zero_sized_axes_k_1_or_3_keep_size(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
for _ in sm.xrange(10):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Rot90([1, 3], keep_size=True)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
def test_get_parameters(self):
aug = iaa.Rot90([1, 3], keep_size=False)
assert aug.get_parameters()[0] == aug.k
assert aug.get_parameters()[1] is False
def test_other_dtypes_bool(self):
aug = iaa.Rot90(2)
image = np.zeros((3, 3), dtype=bool)
image[0, 0] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert np.all(image_aug[0, 0] == 0)
assert np.all(image_aug[2, 2] == 1)
def test_other_dtypes_uint_int(self):
aug = iaa.Rot90(2)
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = max_value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug[0, 0] == 0)
assert np.all(image_aug[2, 2] == max_value)
def test_other_dtypes_float(self):
aug = iaa.Rot90(2)
dtypes = ["float16", "float32", "float64", "float128"]
for dtype in dtypes:
def _allclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.allclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0, 1.0, 10.0, 100.0, 500 ** (isize-1), 1000 ** (isize-1)]
values = values + [(-1) * value for value in values]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert _allclose(image_aug[0, 0], 0)
assert _allclose(image_aug[2, 2], np.float128(value))
|
#!/usr/bin/env python
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import argparse
import grp
import json
import logging
import os.path
import re
import socket
import stat
import subprocess
import sys
def run(*args):
"""Wrapper to Popen."""
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout, stderr
def nic_available(interface):
"""Check if specified network interface is available."""
try:
subprocess.check_call([settings.ifconfig, interface],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return True
except subprocess.CalledProcessError:
return False
def rt_available(rt_table):
"""Check if specified routing table is defined."""
try:
subprocess.check_call([settings.ip, "route", "list", "table", rt_table],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return True
except subprocess.CalledProcessError:
return False
def vpn_status():
"""Gets current VPN status."""
ret = {}
for line in run(settings.openvpn, "status")[0].split("\n"):
x = re.search("'(?P<vpn>\\w+)'\\ is\\ (?P<running>not)?", line)
if x:
ret[x.group("vpn")] = x.group("running") != "not"
return ret
def vpn_enable(name):
"""Start a VPN."""
run(settings.openvpn, "start", name)
def vpn_disable(name):
"""Stop a running VPN."""
run(settings.openvpn, "stop", name)
def forward_drop():
"""Disable any and all forwarding unless explicitly said so."""
run(settings.iptables, "-P", "FORWARD", "DROP")
def enable_nat(interface):
"""Enable NAT on this interface."""
run(settings.iptables, "-t", "nat", "-A", "POSTROUTING",
"-o", interface, "-j", "MASQUERADE")
def disable_nat(interface):
"""Disable NAT on this interface."""
run(settings.iptables, "-t", "nat", "-D", "POSTROUTING",
"-o", interface, "-j", "MASQUERADE")
def init_rttable(rt_table, interface):
"""Initialise routing table for this interface using routes
from main table."""
if rt_table in ["local", "main", "default"]:
return
stdout, _ = run(settings.ip, "route", "list", "dev", interface)
for line in stdout.split("\n"):
args = ["route", "add"] + [x for x in line.split(" ") if x]
args += ["dev", interface, "table", rt_table]
run(settings.ip, *args)
def flush_rttable(rt_table):
"""Flushes specified routing table entries."""
if rt_table in ["local", "main", "default"]:
return
run(settings.ip, "route", "flush", "table", rt_table)
def forward_enable(src, dst, ipaddr):
"""Enable forwarding a specific IP address from one interface into
another."""
run(settings.iptables, "-A", "FORWARD", "-i", src, "-o", dst,
"--source", ipaddr, "-j", "ACCEPT")
run(settings.iptables, "-A", "FORWARD", "-i", dst, "-o", src,
"--destination", ipaddr, "-j", "ACCEPT")
def forward_disable(src, dst, ipaddr):
"""Disable forwarding of a specific IP address from one interface into
another."""
run(settings.iptables, "-D", "FORWARD", "-i", src, "-o", dst,
"--source", ipaddr, "-j", "ACCEPT")
run(settings.iptables, "-D", "FORWARD", "-i", dst, "-o", src,
"--destination", ipaddr, "-j", "ACCEPT")
def srcroute_enable(rt_table, ipaddr):
"""Enable routing policy for specified source IP address."""
run(settings.ip, "rule", "add", "from", ipaddr, "table", rt_table)
run(settings.ip, "route", "flush", "cache")
def srcroute_disable(rt_table, ipaddr):
"""Disable routing policy for specified source IP address."""
run(settings.ip, "rule", "del", "from", ipaddr, "table", rt_table)
run(settings.ip, "route", "flush", "cache")
handlers = {
"nic_available": nic_available,
"rt_available": rt_available,
"vpn_status": vpn_status,
"vpn_enable": vpn_enable,
"vpn_disable": vpn_disable,
"forward_drop": forward_drop,
"enable_nat": enable_nat,
"disable_nat": disable_nat,
"init_rttable": init_rttable,
"flush_rttable": flush_rttable,
"forward_enable": forward_enable,
"forward_disable": forward_disable,
"srcroute_enable": srcroute_enable,
"srcroute_disable": srcroute_disable,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("socket", nargs="?", default="/tmp/cuckoo-rooter",
help="Unix socket path")
parser.add_argument("-g", "--group", default="cuckoo",
help="Unix socket group")
parser.add_argument("--ifconfig", default="/sbin/ifconfig",
help="Path to ifconfig")
parser.add_argument("--openvpn", default="service",
help="Command or init script path to run OpenVPN")
parser.add_argument("--iptables", default="/sbin/iptables",
help="Path to iptables")
parser.add_argument("--ip", default="/sbin/ip", help="Path to ip")
parser.add_argument("-v", "--verbose", action="store_true",
help="Enable verbose logging")
settings = parser.parse_args()
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("cuckoo-rooter")
if not settings.openvpn or not os.path.exists(settings.openvpn):
sys.exit("OpenVPN binary is not available, please configure!")
if not settings.ifconfig or not os.path.exists(settings.ifconfig):
sys.exit("The `ifconfig` binary is not available, eh?!")
if not settings.iptables or not os.path.exists(settings.iptables):
sys.exit("The `iptables` binary is not available, eh?!")
if os.getuid():
sys.exit("This utility is supposed to be ran as root.")
if os.path.exists(settings.socket):
os.remove(settings.socket)
server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
server.bind(settings.socket)
# Provide the correct file ownership and permission so Cuckoo can use it
# from an unprivileged process, based on Sean Whalen's routetor.
try:
gr = grp.getgrnam(settings.group)
except KeyError:
sys.exit(
"The group (`%s`) does not exist. Please define the group / user "
"through which Cuckoo will connect to the rooter, e.g., "
"./utils/rooter.py -g myuser" % settings.group
)
os.chown(settings.socket, 0, gr.gr_gid)
os.chmod(settings.socket, stat.S_IRUSR | stat.S_IWUSR | stat.S_IWGRP)
while True:
command, addr = server.recvfrom(4096)
try:
obj = json.loads(command)
except:
log.info("Received invalid request: %r", command)
continue
command = obj.get("command")
args = obj.get("args", [])
kwargs = obj.get("kwargs", {})
if not isinstance(command, basestring) or command not in handlers:
log.info("Received incorrect command: %r", command)
continue
if not isinstance(args, (tuple, list)):
log.info("Invalid arguments type: %r", args)
continue
if not isinstance(kwargs, dict):
log.info("Invalid keyword arguments: %r", kwargs)
continue
for arg in args + kwargs.keys() + kwargs.values():
if not isinstance(arg, basestring):
log.info("Invalid argument detected: %r", arg)
break
else:
if settings.verbose:
log.info(
"Processing command: %s %s %s", command,
" ".join(args),
" ".join("%s=%s" % (k, v) for k, v in kwargs.items())
)
output = e = None
try:
output = handlers[command](*args, **kwargs)
except Exception as e:
log.exception("Error executing command")
server.sendto(json.dumps({
"output": output,
"exception": str(e) if e else None,
}), addr)
further refine the rooter.py script
Although it removes the cross-compatibility with CentOS this change adds
an error message that users could follow on CentOS-based systems.
Note that the CentOS (or Ubuntu/Debian) support was broken due to the
"service" binary not being an actual file, usually; something that
rooter.py looks for during startup. Anyway, should be fine now.
#!/usr/bin/env python
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import argparse
import grp
import json
import logging
import os.path
import re
import socket
import stat
import subprocess
import sys
def run(*args):
"""Wrapper to Popen."""
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout, stderr
def nic_available(interface):
"""Check if specified network interface is available."""
try:
subprocess.check_call([settings.ifconfig, interface],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return True
except subprocess.CalledProcessError:
return False
def rt_available(rt_table):
"""Check if specified routing table is defined."""
try:
subprocess.check_call([settings.ip, "route", "list", "table", rt_table],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return True
except subprocess.CalledProcessError:
return False
def vpn_status():
"""Gets current VPN status."""
ret = {}
for line in run(settings.service, "openvpn", "status")[0].split("\n"):
x = re.search("'(?P<vpn>\\w+)'\\ is\\ (?P<running>not)?", line)
if x:
ret[x.group("vpn")] = x.group("running") != "not"
return ret
def vpn_enable(name):
"""Start a VPN."""
run(settings.service, "openvpn", "start", name)
def vpn_disable(name):
"""Stop a running VPN."""
run(settings.service, "openvpn", "stop", name)
def forward_drop():
"""Disable any and all forwarding unless explicitly said so."""
run(settings.iptables, "-P", "FORWARD", "DROP")
def enable_nat(interface):
"""Enable NAT on this interface."""
run(settings.iptables, "-t", "nat", "-A", "POSTROUTING",
"-o", interface, "-j", "MASQUERADE")
def disable_nat(interface):
"""Disable NAT on this interface."""
run(settings.iptables, "-t", "nat", "-D", "POSTROUTING",
"-o", interface, "-j", "MASQUERADE")
def init_rttable(rt_table, interface):
"""Initialise routing table for this interface using routes
from main table."""
if rt_table in ["local", "main", "default"]:
return
stdout, _ = run(settings.ip, "route", "list", "dev", interface)
for line in stdout.split("\n"):
args = ["route", "add"] + [x for x in line.split(" ") if x]
args += ["dev", interface, "table", rt_table]
run(settings.ip, *args)
def flush_rttable(rt_table):
"""Flushes specified routing table entries."""
if rt_table in ["local", "main", "default"]:
return
run(settings.ip, "route", "flush", "table", rt_table)
def forward_enable(src, dst, ipaddr):
"""Enable forwarding a specific IP address from one interface into
another."""
run(settings.iptables, "-A", "FORWARD", "-i", src, "-o", dst,
"--source", ipaddr, "-j", "ACCEPT")
run(settings.iptables, "-A", "FORWARD", "-i", dst, "-o", src,
"--destination", ipaddr, "-j", "ACCEPT")
def forward_disable(src, dst, ipaddr):
"""Disable forwarding of a specific IP address from one interface into
another."""
run(settings.iptables, "-D", "FORWARD", "-i", src, "-o", dst,
"--source", ipaddr, "-j", "ACCEPT")
run(settings.iptables, "-D", "FORWARD", "-i", dst, "-o", src,
"--destination", ipaddr, "-j", "ACCEPT")
def srcroute_enable(rt_table, ipaddr):
"""Enable routing policy for specified source IP address."""
run(settings.ip, "rule", "add", "from", ipaddr, "table", rt_table)
run(settings.ip, "route", "flush", "cache")
def srcroute_disable(rt_table, ipaddr):
"""Disable routing policy for specified source IP address."""
run(settings.ip, "rule", "del", "from", ipaddr, "table", rt_table)
run(settings.ip, "route", "flush", "cache")
handlers = {
"nic_available": nic_available,
"rt_available": rt_available,
"vpn_status": vpn_status,
"vpn_enable": vpn_enable,
"vpn_disable": vpn_disable,
"forward_drop": forward_drop,
"enable_nat": enable_nat,
"disable_nat": disable_nat,
"init_rttable": init_rttable,
"flush_rttable": flush_rttable,
"forward_enable": forward_enable,
"forward_disable": forward_disable,
"srcroute_enable": srcroute_enable,
"srcroute_disable": srcroute_disable,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("socket", nargs="?", default="/tmp/cuckoo-rooter",
help="Unix socket path")
parser.add_argument("-g", "--group", default="cuckoo",
help="Unix socket group")
parser.add_argument("--ifconfig", default="/sbin/ifconfig",
help="Path to ifconfig")
parser.add_argument("--service", default="/usr/sbin/service",
help="Service wrapper script for invoking OpenVPN")
parser.add_argument("--iptables", default="/sbin/iptables",
help="Path to iptables")
parser.add_argument("--ip", default="/sbin/ip", help="Path to ip")
parser.add_argument("-v", "--verbose", action="store_true",
help="Enable verbose logging")
settings = parser.parse_args()
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("cuckoo-rooter")
if not settings.service or not os.path.exists(settings.service):
sys.exit(
"The service binary is not available, please configure it!\n"
"Note that on CentOS you should provide --service /sbin/service, "
"rather than using the Ubuntu/Debian default /usr/sbin/service."
)
if not settings.ifconfig or not os.path.exists(settings.ifconfig):
sys.exit("The `ifconfig` binary is not available, eh?!")
if not settings.iptables or not os.path.exists(settings.iptables):
sys.exit("The `iptables` binary is not available, eh?!")
if os.getuid():
sys.exit("This utility is supposed to be ran as root.")
if os.path.exists(settings.socket):
os.remove(settings.socket)
server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
server.bind(settings.socket)
# Provide the correct file ownership and permission so Cuckoo can use it
# from an unprivileged process, based on Sean Whalen's routetor.
try:
gr = grp.getgrnam(settings.group)
except KeyError:
sys.exit(
"The group (`%s`) does not exist. Please define the group / user "
"through which Cuckoo will connect to the rooter, e.g., "
"./utils/rooter.py -g myuser" % settings.group
)
os.chown(settings.socket, 0, gr.gr_gid)
os.chmod(settings.socket, stat.S_IRUSR | stat.S_IWUSR | stat.S_IWGRP)
while True:
command, addr = server.recvfrom(4096)
try:
obj = json.loads(command)
except:
log.info("Received invalid request: %r", command)
continue
command = obj.get("command")
args = obj.get("args", [])
kwargs = obj.get("kwargs", {})
if not isinstance(command, basestring) or command not in handlers:
log.info("Received incorrect command: %r", command)
continue
if not isinstance(args, (tuple, list)):
log.info("Invalid arguments type: %r", args)
continue
if not isinstance(kwargs, dict):
log.info("Invalid keyword arguments: %r", kwargs)
continue
for arg in args + kwargs.keys() + kwargs.values():
if not isinstance(arg, basestring):
log.info("Invalid argument detected: %r", arg)
break
else:
if settings.verbose:
log.info(
"Processing command: %s %s %s", command,
" ".join(args),
" ".join("%s=%s" % (k, v) for k, v in kwargs.items())
)
output = e = None
try:
output = handlers[command](*args, **kwargs)
except Exception as e:
log.exception("Error executing command")
server.sendto(json.dumps({
"output": output,
"exception": str(e) if e else None,
}), addr)
|
import torch
import hyperchamber as hc
import numpy as np
import inspect
from operator import itemgetter
from hypergan.train_hooks.base_train_hook import BaseTrainHook
from torch.nn.parameter import Parameter
from torch.autograd import Variable
from torch.autograd import grad as torch_grad
class AdversarialNormTrainHook(BaseTrainHook):
def __init__(self, gan=None, config=None, trainer=None):
super().__init__(config=config, gan=gan, trainer=trainer)
self.d_loss = None
self.g_loss = None
if self.config.gamma is not None:
self.gamma = torch.Tensor([self.config.gamma]).float()[0].cuda()#self.gan.configurable_param(self.config.gamma or 1.0)
if self.config.gammas is not None:
self.gammas = [
torch.Tensor([self.config.gammas[0]]).float()[0].cuda(),#self.gan.configurable_param(self.config.gamma or 1.0)
torch.Tensor([self.config.gammas[1]]).float()[0].cuda()#self.gan.configurable_param(self.config.gamma or 1.0)
]
self.relu = torch.nn.ReLU()
self.target = [Parameter(x, requires_grad=True) for x in self.gan.discriminator_real_inputs()]
self.x_mod_target = torch.zeros_like(self.target[0])
self.g_mod_target = torch.zeros_like(self.target[0])
def forward(self, d_loss, g_loss):
if self.config.mode == "real" or self.config.mode is None:
for target, data in zip(self.target, self.gan.discriminator_real_inputs()):
target.data = data.clone()
d_fake = self.gan.d_fake
d_real = self.gan.forward_discriminator(self.target)
loss, _, mod_target = self.regularize_adversarial_norm(d_fake, d_real, self.target)
norm = self.relu(self.config.offset-((mod_target[0] - self.gan.x) ** 2))
elif self.config.mode == "fake":
for target, data in zip(self.target, self.gan.discriminator_fake_inputs()):
target.data = data.clone().detach()
d_fake = self.gan.forward_discriminator(self.target)
d_real = self.gan.d_real
loss, norm, mod_target = self.regularize_adversarial_norm(d_real, d_fake, self.target)
norm = self.relu(self.config.offset-((mod_target[0] - self.gan.g) ** 2))
if self.config.loss:
if "g" in self.config.loss:
self.g_loss = self.gammas[1] * g_norm.mean()
self.gan.add_metric('an_g', self.g_loss)
if "d" in self.config.loss:
self.d_loss = self.gammas[0] * d_norm.mean()
self.gan.add_metric('an_d', self.d_loss)
if "dg" in self.config.loss:
self.d_loss = self.gammas[0] * d_norm.mean()
self.gan.add_metric('an_d', self.d_loss)
self.g_loss = self.gammas[1] * norm.mean()
self.gan.add_metric('an_g', self.g_loss)
else:
self.d_loss = self.gammas[0] * d_norm.mean()
self.gan.add_metric('an_d', self.d_loss)
return [self.d_loss, self.g_loss]
def regularize_adversarial_norm(self, d1_logits, d2_logits, target):
loss = self.gan.loss.forward_adversarial_norm(d1_logits, d2_logits)
d1_grads = torch_grad(outputs=loss, inputs=target, retain_graph=True, create_graph=True)
d1_norm = [torch.norm(_d1_grads.view(-1),p=2,dim=0) for _d1_grads in d1_grads]
reg_d1 = d1_norm[0]
for d1 in d1_norm[1:]:
reg_d1 = reg_d1 + d1
mod_target = [_d1 + _t for _d1, _t in zip(d1_grads, target)]
return torch_grad(outputs=loss, inputs=target, retain_graph=True, create_graph=True)
[fix] adversarial norm formulation
import torch
import hyperchamber as hc
import numpy as np
import inspect
from operator import itemgetter
from hypergan.train_hooks.base_train_hook import BaseTrainHook
from torch.nn.parameter import Parameter
from torch.autograd import Variable
from torch.autograd import grad as torch_grad
class AdversarialNormTrainHook(BaseTrainHook):
def __init__(self, gan=None, config=None, trainer=None):
super().__init__(config=config, gan=gan, trainer=trainer)
self.d_loss = None
self.g_loss = None
if self.config.gamma is not None:
self.gamma = torch.Tensor([self.config.gamma]).float()[0].cuda()#self.gan.configurable_param(self.config.gamma or 1.0)
if self.config.gammas is not None:
self.gammas = [
torch.Tensor([self.config.gammas[0]]).float()[0].cuda(),#self.gan.configurable_param(self.config.gamma or 1.0)
torch.Tensor([self.config.gammas[1]]).float()[0].cuda()#self.gan.configurable_param(self.config.gamma or 1.0)
]
self.relu = torch.nn.ReLU()
self.target = [Parameter(x, requires_grad=True) for x in self.gan.discriminator_real_inputs()]
self.x_mod_target = torch.zeros_like(self.target[0])
self.g_mod_target = torch.zeros_like(self.target[0])
def forward(self, d_loss, g_loss):
if self.config.mode == "real" or self.config.mode is None:
for target, data in zip(self.target, self.gan.discriminator_real_inputs()):
target.data = data.clone()
d_fake = self.gan.d_fake
d_real = self.gan.forward_discriminator(self.target)
loss, _, mod_target = self.regularize_adversarial_norm(d_fake, d_real, self.target)
norm = self.relu(self.config.offset-((mod_target[0] - self.gan.x) ** 2))
elif self.config.mode == "fake":
for target, data in zip(self.target, self.gan.discriminator_fake_inputs()):
target.data = data.clone()
d_fake = self.gan.forward_discriminator(self.target)
d_real = self.gan.d_real
loss, norm, mod_target = self.regularize_adversarial_norm(d_real, d_fake, self.target)
norm = self.relu(self.config.offset-((mod_target[0] - self.gan.g) ** 2))
if loss is None:
return [None, None]
if self.config.loss:
if "g" in self.config.loss:
self.g_loss = self.gamma * norm.mean()
self.gan.add_metric('an_g', self.g_loss)
if "d" in self.config.loss:
self.d_loss = self.gamma * norm.mean()
self.gan.add_metric('an_d', self.d_loss)
if "dg" in self.config.loss:
self.d_loss = self.gammas[0] * norm.mean()
self.gan.add_metric('an_d', self.d_loss)
self.g_loss = self.gammas[1] * norm.mean()
self.gan.add_metric('an_g', self.g_loss)
else:
self.d_loss = self.gamma * norm.mean()
self.gan.add_metric('an_d', self.d_loss)
return [self.d_loss, self.g_loss]
def regularize_adversarial_norm(self, d1_logits, d2_logits, target):
loss = self.gan.loss.forward_adversarial_norm(d1_logits, d2_logits)
if loss == 0:
return [None, None, None]
d1_grads = torch_grad(outputs=loss, inputs=target, retain_graph=True, create_graph=True)
d1_norm = [torch.norm(_d1_grads.view(-1),p=2,dim=0) for _d1_grads in d1_grads]
reg_d1 = d1_norm[0]
for d1 in d1_norm[1:]:
reg_d1 = reg_d1 + d1
mod_target = [_d1 + _t for _d1, _t in zip(d1_grads, target)]
return loss, reg_d1, mod_target
|
from __future__ import absolute_import, division
import workflows.services
import workflows.services.sample_producer
import mock
import pytest
import Queue
def test_service_can_be_looked_up():
'''Attempt to look up the service by its name'''
service_class = workflows.services.lookup('Producer')
assert service_class == workflows.services.sample_producer.Producer
@pytest.mark.skip(reason="broken test, skip for now")
def test_service_registers_idle_timer():
pass
# service._register_idle(10, idle_trigger)
# cmd_queue = mock.Mock()
# cmd_queue.get.side_effect = [
# Queue.Empty(),
# { 'channel': 'command',
# 'payload': workflows.services.Commands.SHUTDOWN },
# AssertionError('Not observing commands') ]
# fe_queue = Queue.Queue()
# idle_trigger = mock.Mock()
#
# # Create service
# service = workflows.services.Service(
# commands=cmd_queue, frontend=fe_queue)
# service._register_idle(10, idle_trigger)
#
# # Start service
# service.start()
#
# # Check trigger has been called
# idle_trigger.assert_called_once_with()
#
# # Check startup/shutdown sequence
# assert cmd_queue.get.call_count == 2
# assert cmd_queue.get.call_args == ((True, 10),)
# messages = []
# while not fe_queue.empty():
# message = fe_queue.get_nowait()
# if 'statuscode' in message:
# messages.append(message['statuscode'])
# assert messages == [
# service.SERVICE_STATUS_NEW,
# service.SERVICE_STATUS_STARTING,
# service.SERVICE_STATUS_IDLE,
# service.SERVICE_STATUS_TIMER,
# service.SERVICE_STATUS_IDLE,
# service.SERVICE_STATUS_PROCESSING,
# service.SERVICE_STATUS_SHUTDOWN,
# service.SERVICE_STATUS_END,
# ]
Improve test coverage
from __future__ import absolute_import, division
import workflows.services
import workflows.services.sample_producer
import mock
import pytest
def test_service_can_be_looked_up():
'''Attempt to look up the service by its name'''
service_class = workflows.services.lookup('Producer')
assert service_class == workflows.services.sample_producer.Producer
def test_service_registers_idle_timer():
'''Check that the service registers an idle event handler.'''
p = workflows.services.sample_producer.Producer()
mock_idlereg = mock.Mock()
setattr(p, '_register_idle', mock_idlereg)
p.initializing()
mock_idlereg.assert_called_once_with(mock.ANY, p.create_message)
def test_service_produces_messages():
'''Check that the producer produces messages in the idle event handler.'''
p = workflows.services.sample_producer.Producer()
mock_transport = mock.Mock()
setattr(p, '_transport', mock_transport)
p.initializing()
assert not mock_transport.send.called
p.create_message()
mock_transport.send.assert_called_once()
p.create_message()
assert mock_transport.send.call_count == 2
calls = mock_transport.send.call_args_list
assert calls[0][0][0] == calls[1][0][0] # same destination
assert calls[0][0][1] != calls[1][0][1] # different message
|
from troposphere import Template, Select, Ref, Parameter, FindInMap, Output, Base64, Join, GetAtt
import troposphere.iam as iam
import troposphere.ec2 as ec2
import troposphere.autoscaling as autoscaling
import troposphere.cloudformation as cf
import hashlib
import json
import boto
import time
import boto.s3
from boto.s3.key import Key
from datetime import datetime
class EnvironmentBase():
'''
EnvironmentBase encapsulates functionality required to build and deploy a network and common resources for object storage within a specified region
'''
def __init__(self, arg_dict):
'''
Init method for environment base creates all common objects for a given environment within the CloudFormation template including a network, s3 bucket and requisite policies to allow ELB Access log aggregation and CloudTrail log storage
@param arg_dict [dict] keyword arguments to handle setting config-level parameters and arguments within this class
'''
self.globals=arg_dict.get('global', {})
self.manual_parameter_bindings = {}
template=arg_dict.get('template', {})
with open(self.globals.get('strings_path', 'strings.json'), 'r') as f:
json_data = f.read()
self.strings = json.loads(json_data)
self.template = Template()
self.template_args = arg_dict.get('template', {})
self.template.description = template.get('description', 'No Description Specified')
self.subnets = {}
self.add_common_parameters(template)
self.add_ami_mapping(ami_map_file_path=template.get('ami_map_file', 'ami_cache.json'))
def add_common_parameters(self, template_config):
'''
Adds common parameters for instance creation to the CloudFormation template
@param template_config [dict] collection of template-level configuration values to drive the setup of this method
'''
if 'ec2Key' not in self.template.parameters:
self.template.add_parameter(Parameter('ec2Key',
Type='String',
Default=template_config.get('ec2_key_default','default-key'),
Description='Name of an existing EC2 KeyPair to enable SSH access to the instances',
AllowedPattern="[\\x20-\\x7E]*",
MinLength=1,
MaxLength=255,
ConstraintDescription='can only contain ASCII chacacters.'))
if 'remoteAccessLocation' not in self.template.parameters:
self.remote_access_cidr = self.template.add_parameter(Parameter('remoteAccessLocation',
Description='CIDR block identifying the network address space that will be allowed to ingress into public access points within this solution',
Type='String',
Default='0.0.0.0/0',
MinLength=9,
MaxLength=18,
AllowedPattern='(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})',
ConstraintDescription='must be a valid CIDR range of the form x.x.x.x/x'))
def add_region_map_value(self,
region,
key,
value):
'''
Method adds a key value pair to the RegionMap mapping within this CloudFormation template
@param region [string] AWS region name that the key value pair is associated with
@param key [string] name of the key to store in the RegionMap mapping for the specified Region
@param value [string] value portion of the key value pair related to the region specified
'''
self.__init_region_map([region])
if region not in self.template.mappings['RegionMap']:
self.template.mappings['RegionMap'][region] = {}
self.template.mappings['RegionMap'][region][key] = value
def get_cloudtrail_logging_bucket_policy_document(self,
utility_bucket,
cloudtrail_log_prefix='cloudtrail_logs'):
'''
Method builds the S3 bucket policy statements which will allow the proper AWS account ids to write CloudTrail logs to the specified bucket and prefix.
Per documentation located at: http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html
@param utility_bucket [Troposphere.s3.Bucket] object reference of the utility bucket for this tier
@param cloudtrail_log_prefix [string] s3 key name prefix to prepend to the path where CloudTrail will store logs
'''
if cloudtrail_log_prefix != None and cloudtrail_log_prefix != '':
cloudtrail_log_prefix = cloudtrail_log_prefix + '/'
else:
cloudtrail_log_prefix = ''
statements = [{"Action" : ["s3:GetBucketAcl"],
"Resource" : Join('', ["arn:aws:s3:::", Ref(utility_bucket)]),
"Effect" : "Allow",
"Principal": {
"AWS": [
"arn:aws:iam::903692715234:root",
"arn:aws:iam::859597730677:root",
"arn:aws:iam::814480443879:root",
"arn:aws:iam::216624486486:root",
"arn:aws:iam::086441151436:root",
"arn:aws:iam::388731089494:root",
"arn:aws:iam::284668455005:root",
"arn:aws:iam::113285607260:root"]}},
{"Action" : ["s3:PutObject"],
"Resource": Join('', ["arn:aws:s3:::", Ref(utility_bucket), '/', cloudtrail_log_prefix + "AWSLogs/", Ref("AWS::AccountId"), '/*']),
"Effect" : "Allow",
"Principal": {
"AWS": [
"arn:aws:iam::903692715234:root",
"arn:aws:iam::859597730677:root",
"arn:aws:iam::814480443879:root",
"arn:aws:iam::216624486486:root",
"arn:aws:iam::086441151436:root",
"arn:aws:iam::388731089494:root",
"arn:aws:iam::284668455005:root",
"arn:aws:iam::113285607260:root"]},
"Condition": {"StringEquals" : {"s3:x-amz-acl": "bucket-owner-full-control"}}}]
self.template.add_output(Output('cloudTrailLoggingBucketAndPath',
Value=Join('',['arn:aws:s3:::', Ref(utility_bucket), cloudtrail_log_prefix]),
Description='S3 bucket and key name prefix to use when configuring CloudTrail to aggregate logs to S3'))
return {"Statement": statements}
def get_elb_logging_bucket_policy_document(self,
utility_bucket,
elb_log_prefix='elb_logs'):
'''
Method builds the S3 bucket policy statements which will allow the proper AWS account ids to write ELB Access Logs to the specified bucket and prefix.
Per documentation located at: http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/configure-s3-bucket.html
@param utility_bucket [Troposphere.s3.Bucket] object reference of the utility bucket for this tier
@param elb_log_prefix [string] prefix for paths used to prefix the path where ELB will place access logs
'''
if elb_log_prefix != None and elb_log_prefix != '':
elb_log_prefix = elb_log_prefix + '/'
else:
elb_log_prefix = ''
elb_accts = {'us-west-1': '027434742980',
'us-west-2': '797873946194',
'us-east-1': '127311923021',
'eu-west-1': '156460612806',
'ap-northeast-1': '582318560864',
'ap-southeast-1': '114774131450',
'ap-southeast-2': '783225319266',
'sa-east-1': '507241528517',
'us-gov-west-1': '048591011584'}
for region in elb_accts:
self.add_region_map_value(region, 'elbAccountId', elb_accts[region])
statements = [{"Action" : ["s3:PutObject"],
"Effect" : "Allow",
"Resource" : Join('', ['arn:aws:s3:::', Ref(utility_bucket), '/', elb_log_prefix + 'AWSLogs/', Ref('AWS::AccountId'), '/*']),
"Principal" : {"AWS": [FindInMap('RegionMap', Ref('AWS::Region'), 'elbAccountId')]}}]
self.template.add_output(Output('elbAccessLoggingBucketAndPath',
Value=Join('',['arn:aws:s3:::', Ref(utility_bucket), elb_log_prefix]),
Description='S3 bucket and key name prefix to use when configuring elb access logs to aggregate to S3'))
return {"Statement":statements}
def add_ami_mapping(self,
ami_map_file_path='ami_cache.json'):
'''
Method gets the ami cache from the file locally and adds a mapping for ami ids per region into the template
This depdns on populating ami_cache.json with the AMI ids that are output by the packer scripts per region
@param ami_map_file [string] path representing where to find the AMI map to ingest into this template
'''
with open(ami_map_file_path, 'r') as json_file:
json_data = json.load(json_file)
for region in json_data:
for key in json_data[region]:
self.add_region_map_value(region, key, json_data[region][key])
def create_asg(self,
layer_name,
instance_profile,
instance_type=None,
ami_name='ubuntu1404LtsAmiId',
ec2_key=None,
user_data=None,
default_instance_type=None,
security_groups=None,
min_size=1,
max_size=1,
root_volume_size=24,
include_ephemerals=True,
number_ephemeral_vols=2,
ebs_data_volumes=None, #[{'size':'100', 'type':'gp2', 'delete_on_termination': True, 'iops': 4000, 'volume_type': 'io1'}]
custom_tags=None,
load_balancer=None,
instance_monitoring=False,
subnet_type='private'):
'''
Wrapper method used to create an EC2 Launch Configuration and Auto Scaling group
@param layer_name [string] friendly name of the set of instances being created - will be set as the name for instances deployed
@param instance_profile [Troposphere.iam.InstanceProfile] IAM Instance Profile object to be applied to instances launched within this Auto Scaling group
@param instance_type [Troposphere.Parameter | string] Reference to the AWS EC2 Instance Type to deploy.
@param ami_name [string] Name of the AMI to deploy as defined within the RegionMap lookup for the deployed region
@param ec2_key [Troposphere.Parameter | Troposphere.Ref(Troposphere.Parameter)] Input parameter used to gather the name of the EC2 key to use to secure access to instances launched within this Auto Scaling group
@param user_data [string[]] Array of strings (lines of bash script) to be set as the user data as a bootstrap script for instances launched within this Auto Scaling group
@param default_instance_type [string - AWS Instance Type] AWS instance type to set as the default for the input parameter defining the instance type for this layer_name
@param security_groups [Troposphere.ec2.SecurityGroup[]] array of security groups to be applied to instances within this Auto Scaling group
@param min_size [int] value to set as the minimum number of instances for the Auto Scaling group
@param max_size [int] value to set as the maximum number of instances for the Auto Scaling group
@param root_volume_size [int] size (in GiB) to assign to the root volume of the launched instance
@param include_ephemerals [Boolean] indicates that ephemeral volumes should be included in the block device mapping of the Launch Configuration
@param number_ephemeral_vols [int] number of ephemeral volumes to attach within the block device mapping Launch Configuration
@param ebs_data_volumes [list] dictionary pair of size and type data properties in a list used to create ebs volume attachments
@param custom_tags [Troposphere.autoscaling.Tag[]] Collection of Auto Scaling tags to be assigned to the Auto Scaling Group
@param load_balancer [Troposphere.elasticloadbalancing.LoadBalancer] Object reference to an ELB to be assigned to this auto scaling group
@param instance_monitoring [Boolean] indicates that detailed monitoring should be turned on for all instnaces launched within this Auto Scaling group
@param subnet_type [string {'public', 'private'}] string indicating which type of subnet (public or private) instances should be launched into
'''
if subnet_type not in ['public', 'private']:
raise RuntimeError('Unable to determine which type of subnet instances should be launched into. ' + str(subnet_type) + ' is not one of ["public", "private"].')
if ec2_key != None and type(ec2_key) != Parameter:
ec2_key = Ref(ec2_key)
else:
ec2_key = Ref(self.template.parameters['ec2Key'])
if default_instance_type == None:
default_instance_type = 'm1.small'
if instance_type == None or type(instance_type) == str:
instance_type = self.template.add_parameter(Parameter(layer_name + 'InstanceType',
Type='String',
Default=default_instance_type,
Description='Instance type for instances launched within the ' + layer_name + ' auto scaling group',
AllowedValues=self.strings['valid_instance_types'],
ConstraintDescription=self.strings['valid_instance_type_message']))
sg_list = []
for sg in security_groups:
if isinstance(sg, Ref):
sg_list.append(sg)
else:
sg_list.append(Ref(sg))
launch_config_obj = autoscaling.LaunchConfiguration(layer_name + 'LaunchConfiguration',
IamInstanceProfile=Ref(instance_profile),
ImageId=FindInMap('RegionMap', Ref('AWS::Region'), ami_name),
InstanceType=Ref(instance_type),
SecurityGroups=sg_list,
KeyName=ec2_key,
UserData=user_data,
InstanceMonitoring=instance_monitoring)
block_devices = [ec2.BlockDeviceMapping(
DeviceName='/dev/sda1',
Ebs=ec2.EBSBlockDevice(
VolumeSize=root_volume_size))]
device_names = []
for i, c in enumerate('bcdefghijklmnopqrstuvwxyz'):
device_names.append('/dev/sd' + c)
if ebs_data_volumes != None and len(ebs_data_volumes) > 0:
for ebs_volume in ebs_data_volumes:
device_name = device_names.pop()
ebs_block_device = ec2.EBSBlockDevice(
DeleteOnTermination=ebs_volume.get('delete_on_termination', True),
VolumeSize=ebs_volume.get('size', '100'),
VolumeType=ebs_volume.get('type', 'gp2'))
if 'iops' in ebs_volume:
ebs_block_device.Iops = int(ebs_volume.get('iops'))
if 'snapshot_id' in ebs_volume:
ebs_block_device.SnapshotId = ebs_volume.get('snapshot_id')
block_devices.append(ec2.BlockDeviceMapping(
DeviceName = device_name,
Ebs = ebs_block_device))
if include_ephemerals and number_ephemeral_vols > 0:
device_names.reverse()
for x in range(0, number_ephemeral_vols):
device_name = device_names.pop()
block_devices.append(ec2.BlockDeviceMapping(
DeviceName= device_name,
VirtualName= 'ephemeral' + str(x)))
if len(block_devices) > 0:
launch_config_obj.BlockDeviceMappings = block_devices
launch_config = self.template.add_resource(launch_config_obj)
auto_scaling_obj = autoscaling.AutoScalingGroup(layer_name + 'AutoScalingGroup',
AvailabilityZones=self.azs,
LaunchConfigurationName=Ref(launch_config),
MaxSize=max_size,
MinSize=min_size,
DesiredCapacity=min(min_size, max_size),
VPCZoneIdentifier=self.subnets[subnet_type.lower()])
if load_balancer != None:
auto_scaling_obj.LoadBalancerNames = [Ref(load_balancer)]
if custom_tags != None and len(custom_tags) > 0:
if type(custom_tags) != list:
custom_tags = [custom_tags]
auto_scaling_obj.Tags = custom_tags
else:
auto_scaling_obj.Tags = []
auto_scaling_obj.Tags.append(autoscaling.Tag('Name', layer_name, True))
return self.template.add_resource(auto_scaling_obj)
def __init_region_map(self,
region_list):
'''
Internal helper method used to check to ensure mapping dictionaries are present
@param region_list [list(str)] array of strings representing the names of the regions to validate and/or create within the RegionMap CloudFormation mapping
'''
if 'RegionMap' not in self.template.mappings:
self.template.mappings['RegionMap'] = {}
for region_name in region_list:
if region_name not in self.template.mappings['RegionMap']:
self.template.mappings['RegionMap'][region_name] = {}
def create_reciprocal_sg(self,
source_group,
source_group_name,
destination_group,
destination_group_name,
from_port,
to_port=None,
ip_protocol='tcp'):
'''
Helper method creates reciprocal ingress and egress rules given two existing security groups and a set of ports
@param source_group [Troposphere.ec2.SecurityGroup] Object reference to the source security group
@param source_group_name [string] friendly name of the source security group used for labels
@param destination_group [Troposphere.ec2.SecurityGroup] Object reference to the destination security group
@param destination_group_name [string] friendly name of the destination security group used for labels
@param from_port [string] lower boundary of the port range to set for the secuirty group rules
@param to_port [string] upper boundary of the port range to set for the security group rules
@param ip_protocol [string] name of the IP protocol to set this rule for
'''
if to_port == None:
to_port = from_port
if isinstance(from_port, unicode):
from_port = from_port.encode('ascii', 'ignore')
if isinstance(to_port, unicode):
to_port = to_port.encode('ascii', 'ignore')
if from_port == to_port:
if isinstance(from_port, str):
label_suffix = ip_protocol.capitalize() + from_port
else:
label_suffix = ip_protocol.capitalize() + 'Mapped'
else:
if isinstance(from_port, str) and isinstance(to_port, str):
label_suffix = ip_protocol.capitalize() + from_port + 'To' + to_port
else:
label_suffix = ip_protocol.capitalize() + 'MappedPorts'
self.template.add_resource(ec2.SecurityGroupIngress(destination_group_name + 'Ingress' + source_group_name + label_suffix,
SourceSecurityGroupId=Ref(source_group),
GroupId=Ref(destination_group),
FromPort=from_port,
ToPort=to_port,
IpProtocol=ip_protocol))
self.template.add_resource(ec2.SecurityGroupEgress(source_group_name + 'Egress' + destination_group_name + label_suffix,
DestinationSecurityGroupId=Ref(destination_group),
GroupId=Ref(source_group),
FromPort=from_port,
ToPort=to_port,
IpProtocol=ip_protocol))
def to_json(self):
'''
Centralized method for managing outputting this template with a timestamp identifying when it was generated and for creating a SHA256 hash representing the template for validation purposes
'''
if 'dateGenerated' not in self.template.outputs:
self.template.add_output(Output('dateGenerated',
Value=str(datetime.utcnow()),
Description='UTC datetime representation of when this template was generated'))
if 'templateValidationHash' not in self.template.outputs:
m = hashlib.sha256()
m.update(EnvironmentBase.__validation_formatter(self.template))
self.template.add_output(Output('templateValidationHash',
Value=m.hexdigest(),
Description='Hash of this template that can be used as a simple means of validating whether a template has been changed since it was generated.'))
return self.template.to_json()
@staticmethod
def validate_template_file(cloudformation_template_path,
validation_output_name='templateValidationHash'):
'''
Method takes a file path, reads it and validates the template via the SHA256 checksum that is to be located within the Outputs collection of the cloudFormation template
@param cloudformation_template_path [string] path from which to read the cloudformation template
@param validation_output_name [string] name of the output to use to gather the SHA256 hash to validate
'''
with open(cloudformation_template_path, 'r') as f:
cf_template_contents = f.read()
return EnvironmentBase.validate_template_contents(cf_template_contents, validation_output_name)
@staticmethod
def build_bootstrap(bootstrap_files,
variable_declarations=None,
cleanup_commands=None):
'''
Method encapsulates process of building out the bootstrap given a set of variables and a bootstrap file to source from
Returns base 64-wrapped, joined bootstrap to be applied to an instnace
@param bootstrap_files [ string[] ] list of paths to the bash script(s) to read as the source for the bootstrap action to created
@param variable_declaration [ list ] list of lines to add to the head of the file - used to inject bash variables into the script
@param cleanup_commnds [ string[] ] list of lines to add at the end of the file - used for layer-specific details
'''
ret_val = ['#!/bin/bash']
if variable_declarations != None:
for line in variable_declarations:
ret_val.append(line)
for bootstrap_file in bootstrap_files:
for line in EnvironmentBase.get_file_contents(bootstrap_file):
ret_val.append(line)
if cleanup_commands != None:
for line in cleanup_commands:
ret_val.append(line)
return Base64(Join("\n", ret_val))
@staticmethod
def get_file_contents(file_name):
'''
Method encpsulates reading a file into a list while removing newline characters
@param file_name [string] path to file to read
'''
ret_val = []
with open(file_name) as f:
content = f.readlines()
for line in content:
if not line.startswith('#~'):
ret_val.append(line.replace("\n", ""))
return ret_val
@staticmethod
def __validation_formatter(cf_template):
'''
Validation formatter helps to ensure consistent formatting for hash validation workflow
@param json_string [string | Troposphere.Template | dict] JSON-able data to be formatted for validation
'''
if type(cf_template) == Template:
json_string = json.dumps(json.loads(cf_template.to_json()))
elif type(cf_template) == dict:
json_string = json.dumps(cf_template)
return json.dumps(json.loads(json_string), separators=(',',':'))
@staticmethod
def validate_template_contents(cloudformation_template_string,
validation_output_name='templateValidationHash'):
'''
Method takes the contents of a CloudFormation template and validates the SHA256 hash
@param cloudformation_template_string [string] string contents of the CloudFormation template to validate
@param validation_output_name [string] name of the CloudFormation output containing the SHA256 hash to be validated
'''
template_object = json.loads(cloudformation_template_string)
if 'Outputs' in template_object:
if validation_output_name in template_object['Outputs']:
if 'Value' in template_object['Outputs'][validation_output_name]:
hash_to_validate = template_object['Outputs'][validation_output_name]['Value']
del template_object['Outputs'][validation_output_name]
m = hashlib.sha256()
m.update(EnvironmentBase.__validation_formatter(template_object))
template_hash = m.hexdigest()
print '* hash to validate: ' + hash_to_validate
print '* calculated hash: ' + template_hash
if hash_to_validate == template_hash:
print 'Template is valid'
else:
raise RuntimeError('Template hash is not valid')
else:
print 'Cannot validate this template as it appears it is corrupt. The [' + validation_output_name + '] output does not contain a value property.'
else:
print 'Cannot validate this template as it does not contain the specified output [' + validation_output_name + '] - check to make sure this is the right name and try again.'
else:
print 'This template does not contain a collection of outputs. Please check the input template and try again.'
def create_instance_profile(self,
layer_name,
iam_policies=None):
'''
Helper method creates an IAM Role and Instance Profile for the optoinally specified IAM policies
@param layer_name [string] friendly name for the Role and Instance Profile used for naming and path organization
@param iam_policies [Troposphere.iam.Policy[]] array of IAM Policies to be associated with the Role and Instance Profile created
'''
iam_role_obj = iam.Role(layer_name + 'IAMRole',
AssumeRolePolicyDocument={
'Statement': [{
'Effect': 'Allow',
'Principal': {'Service': ['ec2.amazonaws.com']},
'Action': ['sts:AssumeRole']
}]},
Path=Join('',['/' + self.globals.get('environment_name', 'environmentbase') + '/', layer_name , '/']))
if iam_policies != None:
iam_role_obj.Policies = iam_policies
iam_role = self.template.add_resource(iam_role_obj)
return self.template.add_resource(iam.InstanceProfile(layer_name + 'InstancePolicy',
Path='/' + self.globals.get('environment_name', 'environmentbase') + '/',
Roles=[Ref(iam_role)]))
def add_child_template(self,
name,
template,
s3_bucket=None,
s3_key_prefix=None,
s3_canned_acl=None):
'''
Method adds a child template to this object's template and binds the child template parameters to properties, resources and other stack outputs
@param name [str] name of this template for key naming in s3
@param template [Troposphere.Template] Troposphere Template object to add as a child to this object's template
@param template_args [dict] key-value pair of configuration values for templates to apply to this operation
@param s3_bucket [str] name of the bucket to upload keys to - will default to value in template_args if not present
@param s3_key_prefix [str] s3 key name prefix to prepend to s3 key path - will default to value in template_args if not present
@param s3_canned_acl [str] name of the s3 canned acl to apply to templates uploaded to S3 - will default to value in template_args if not present
'''
key_serial = str(int(time.time()))
if s3_bucket == None:
s3_bucket = self.template_args.get('s3_bucket')
if s3_bucket == None:
raise RuntimeError('Cannot upload template to s3 as a s3 bucket was not specified nor set as a default')
if s3_key_prefix == None:
s3_key_prefix = self.template_args.get('s3_key_name_prefix', '')
if s3_key_prefix == None:
s3_key_name = '/' + name + '.' + key_serial + '.template'
else:
s3_key_name = s3_key_prefix + '/' + name + '.' + key_serial + '.template'
if s3_canned_acl == None:
s3_canned_acl = self.template_args.get('s3_canned_acl', 'private')
if self.template_args.get('mock_upload',False):
stack_url = 'http://www.dualspark.com'
else:
conn = boto.connect_s3()
bucket = conn.get_bucket(s3_bucket)
key = Key(bucket)
key.key = s3_key_name
key.set_contents_from_string(template.to_json())
key.set_acl(s3_canned_acl)
stack_url = key.generate_url(expires_in=0, query_auth=False)
if name not in self.stack_outputs:
self.stack_outputs[name] = []
stack_params = {}
for parameter in template.parameters.keys():
if parameter in self.manual_parameter_bindings:
stack_params[parameter] = self.manual_parameter_bindings[parameter]
elif parameter.startswith('availabilityZone'):
stack_params[parameter] = GetAtt('privateSubnet' + parameter.replace('availabilityZone',''), 'AvailabilityZone')
elif parameter in self.template.parameters.keys():
stack_params[parameter] = Ref(self.template.parameters.get(parameter))
elif parameter in self.template.resources.keys():
stack_params[parameter] = Ref(self.template.resources.get(parameter))
elif parameter in self.stack_outputs:
stack_params[parameter] = GetAtt(self.stack_outputs[parameter], 'Outputs.' + parameter)
else:
stack_params[parameter] = Ref(self.template.add_parameter(template.parameters[parameter]))
stack_name = name + 'Stack'
for output in template.outputs:
if output not in self.stack_outputs:
self.stack_outputs[output] = stack_name
else:
raise RuntimeError('Cannot add child stack with output named ' + output + ' as it was already added by stack named ' + self.stack_outputs[output])
self.template.add_resource(cf.Stack(stack_name,
TemplateURL=stack_url,
Parameters=stack_params,
TimeoutInMinutes=self.template_args.get('timeout_in_minutes', '60')))
if __name__ == '__main__':
import json
with open('config_args.json', 'r') as f:
cmd_args = json.loads(f.read())
test = EnvironmentBase(cmd_args)
print test.to_json()
minor list comprehension tweak
from troposphere import Template, Select, Ref, Parameter, FindInMap, Output, Base64, Join, GetAtt
import troposphere.iam as iam
import troposphere.ec2 as ec2
import troposphere.autoscaling as autoscaling
import troposphere.cloudformation as cf
import hashlib
import json
import boto
import time
import boto.s3
from boto.s3.key import Key
from datetime import datetime
class EnvironmentBase():
'''
EnvironmentBase encapsulates functionality required to build and deploy a network and common resources for object storage within a specified region
'''
def __init__(self, arg_dict):
'''
Init method for environment base creates all common objects for a given environment within the CloudFormation template including a network, s3 bucket and requisite policies to allow ELB Access log aggregation and CloudTrail log storage
@param arg_dict [dict] keyword arguments to handle setting config-level parameters and arguments within this class
'''
self.globals=arg_dict.get('global', {})
self.manual_parameter_bindings = {}
template=arg_dict.get('template', {})
with open(self.globals.get('strings_path', 'strings.json'), 'r') as f:
json_data = f.read()
self.strings = json.loads(json_data)
self.template = Template()
self.template_args = arg_dict.get('template', {})
self.template.description = template.get('description', 'No Description Specified')
self.subnets = {}
self.add_common_parameters(template)
self.add_ami_mapping(ami_map_file_path=template.get('ami_map_file', 'ami_cache.json'))
def add_common_parameters(self, template_config):
'''
Adds common parameters for instance creation to the CloudFormation template
@param template_config [dict] collection of template-level configuration values to drive the setup of this method
'''
if 'ec2Key' not in self.template.parameters:
self.template.add_parameter(Parameter('ec2Key',
Type='String',
Default=template_config.get('ec2_key_default','default-key'),
Description='Name of an existing EC2 KeyPair to enable SSH access to the instances',
AllowedPattern="[\\x20-\\x7E]*",
MinLength=1,
MaxLength=255,
ConstraintDescription='can only contain ASCII chacacters.'))
if 'remoteAccessLocation' not in self.template.parameters:
self.remote_access_cidr = self.template.add_parameter(Parameter('remoteAccessLocation',
Description='CIDR block identifying the network address space that will be allowed to ingress into public access points within this solution',
Type='String',
Default='0.0.0.0/0',
MinLength=9,
MaxLength=18,
AllowedPattern='(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})',
ConstraintDescription='must be a valid CIDR range of the form x.x.x.x/x'))
def add_region_map_value(self,
region,
key,
value):
'''
Method adds a key value pair to the RegionMap mapping within this CloudFormation template
@param region [string] AWS region name that the key value pair is associated with
@param key [string] name of the key to store in the RegionMap mapping for the specified Region
@param value [string] value portion of the key value pair related to the region specified
'''
self.__init_region_map([region])
if region not in self.template.mappings['RegionMap']:
self.template.mappings['RegionMap'][region] = {}
self.template.mappings['RegionMap'][region][key] = value
def get_cloudtrail_logging_bucket_policy_document(self,
utility_bucket,
cloudtrail_log_prefix='cloudtrail_logs'):
'''
Method builds the S3 bucket policy statements which will allow the proper AWS account ids to write CloudTrail logs to the specified bucket and prefix.
Per documentation located at: http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html
@param utility_bucket [Troposphere.s3.Bucket] object reference of the utility bucket for this tier
@param cloudtrail_log_prefix [string] s3 key name prefix to prepend to the path where CloudTrail will store logs
'''
if cloudtrail_log_prefix != None and cloudtrail_log_prefix != '':
cloudtrail_log_prefix = cloudtrail_log_prefix + '/'
else:
cloudtrail_log_prefix = ''
statements = [{"Action" : ["s3:GetBucketAcl"],
"Resource" : Join('', ["arn:aws:s3:::", Ref(utility_bucket)]),
"Effect" : "Allow",
"Principal": {
"AWS": [
"arn:aws:iam::903692715234:root",
"arn:aws:iam::859597730677:root",
"arn:aws:iam::814480443879:root",
"arn:aws:iam::216624486486:root",
"arn:aws:iam::086441151436:root",
"arn:aws:iam::388731089494:root",
"arn:aws:iam::284668455005:root",
"arn:aws:iam::113285607260:root"]}},
{"Action" : ["s3:PutObject"],
"Resource": Join('', ["arn:aws:s3:::", Ref(utility_bucket), '/', cloudtrail_log_prefix + "AWSLogs/", Ref("AWS::AccountId"), '/*']),
"Effect" : "Allow",
"Principal": {
"AWS": [
"arn:aws:iam::903692715234:root",
"arn:aws:iam::859597730677:root",
"arn:aws:iam::814480443879:root",
"arn:aws:iam::216624486486:root",
"arn:aws:iam::086441151436:root",
"arn:aws:iam::388731089494:root",
"arn:aws:iam::284668455005:root",
"arn:aws:iam::113285607260:root"]},
"Condition": {"StringEquals" : {"s3:x-amz-acl": "bucket-owner-full-control"}}}]
self.template.add_output(Output('cloudTrailLoggingBucketAndPath',
Value=Join('',['arn:aws:s3:::', Ref(utility_bucket), cloudtrail_log_prefix]),
Description='S3 bucket and key name prefix to use when configuring CloudTrail to aggregate logs to S3'))
return {"Statement": statements}
def get_elb_logging_bucket_policy_document(self,
utility_bucket,
elb_log_prefix='elb_logs'):
'''
Method builds the S3 bucket policy statements which will allow the proper AWS account ids to write ELB Access Logs to the specified bucket and prefix.
Per documentation located at: http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/configure-s3-bucket.html
@param utility_bucket [Troposphere.s3.Bucket] object reference of the utility bucket for this tier
@param elb_log_prefix [string] prefix for paths used to prefix the path where ELB will place access logs
'''
if elb_log_prefix != None and elb_log_prefix != '':
elb_log_prefix = elb_log_prefix + '/'
else:
elb_log_prefix = ''
elb_accts = {'us-west-1': '027434742980',
'us-west-2': '797873946194',
'us-east-1': '127311923021',
'eu-west-1': '156460612806',
'ap-northeast-1': '582318560864',
'ap-southeast-1': '114774131450',
'ap-southeast-2': '783225319266',
'sa-east-1': '507241528517',
'us-gov-west-1': '048591011584'}
for region in elb_accts:
self.add_region_map_value(region, 'elbAccountId', elb_accts[region])
statements = [{"Action" : ["s3:PutObject"],
"Effect" : "Allow",
"Resource" : Join('', ['arn:aws:s3:::', Ref(utility_bucket), '/', elb_log_prefix + 'AWSLogs/', Ref('AWS::AccountId'), '/*']),
"Principal" : {"AWS": [FindInMap('RegionMap', Ref('AWS::Region'), 'elbAccountId')]}}]
self.template.add_output(Output('elbAccessLoggingBucketAndPath',
Value=Join('',['arn:aws:s3:::', Ref(utility_bucket), elb_log_prefix]),
Description='S3 bucket and key name prefix to use when configuring elb access logs to aggregate to S3'))
return {"Statement":statements}
def add_ami_mapping(self,
ami_map_file_path='ami_cache.json'):
'''
Method gets the ami cache from the file locally and adds a mapping for ami ids per region into the template
This depdns on populating ami_cache.json with the AMI ids that are output by the packer scripts per region
@param ami_map_file [string] path representing where to find the AMI map to ingest into this template
'''
with open(ami_map_file_path, 'r') as json_file:
json_data = json.load(json_file)
for region in json_data:
for key in json_data[region]:
self.add_region_map_value(region, key, json_data[region][key])
def create_asg(self,
layer_name,
instance_profile,
instance_type=None,
ami_name='ubuntu1404LtsAmiId',
ec2_key=None,
user_data=None,
default_instance_type=None,
security_groups=None,
min_size=1,
max_size=1,
root_volume_size=24,
include_ephemerals=True,
number_ephemeral_vols=2,
ebs_data_volumes=None, #[{'size':'100', 'type':'gp2', 'delete_on_termination': True, 'iops': 4000, 'volume_type': 'io1'}]
custom_tags=None,
load_balancer=None,
instance_monitoring=False,
subnet_type='private'):
'''
Wrapper method used to create an EC2 Launch Configuration and Auto Scaling group
@param layer_name [string] friendly name of the set of instances being created - will be set as the name for instances deployed
@param instance_profile [Troposphere.iam.InstanceProfile] IAM Instance Profile object to be applied to instances launched within this Auto Scaling group
@param instance_type [Troposphere.Parameter | string] Reference to the AWS EC2 Instance Type to deploy.
@param ami_name [string] Name of the AMI to deploy as defined within the RegionMap lookup for the deployed region
@param ec2_key [Troposphere.Parameter | Troposphere.Ref(Troposphere.Parameter)] Input parameter used to gather the name of the EC2 key to use to secure access to instances launched within this Auto Scaling group
@param user_data [string[]] Array of strings (lines of bash script) to be set as the user data as a bootstrap script for instances launched within this Auto Scaling group
@param default_instance_type [string - AWS Instance Type] AWS instance type to set as the default for the input parameter defining the instance type for this layer_name
@param security_groups [Troposphere.ec2.SecurityGroup[]] array of security groups to be applied to instances within this Auto Scaling group
@param min_size [int] value to set as the minimum number of instances for the Auto Scaling group
@param max_size [int] value to set as the maximum number of instances for the Auto Scaling group
@param root_volume_size [int] size (in GiB) to assign to the root volume of the launched instance
@param include_ephemerals [Boolean] indicates that ephemeral volumes should be included in the block device mapping of the Launch Configuration
@param number_ephemeral_vols [int] number of ephemeral volumes to attach within the block device mapping Launch Configuration
@param ebs_data_volumes [list] dictionary pair of size and type data properties in a list used to create ebs volume attachments
@param custom_tags [Troposphere.autoscaling.Tag[]] Collection of Auto Scaling tags to be assigned to the Auto Scaling Group
@param load_balancer [Troposphere.elasticloadbalancing.LoadBalancer] Object reference to an ELB to be assigned to this auto scaling group
@param instance_monitoring [Boolean] indicates that detailed monitoring should be turned on for all instnaces launched within this Auto Scaling group
@param subnet_type [string {'public', 'private'}] string indicating which type of subnet (public or private) instances should be launched into
'''
if subnet_type not in ['public', 'private']:
raise RuntimeError('Unable to determine which type of subnet instances should be launched into. ' + str(subnet_type) + ' is not one of ["public", "private"].')
if ec2_key != None and type(ec2_key) != Parameter:
ec2_key = Ref(ec2_key)
else:
ec2_key = Ref(self.template.parameters['ec2Key'])
if default_instance_type == None:
default_instance_type = 'm1.small'
if instance_type == None or type(instance_type) == str:
instance_type = self.template.add_parameter(Parameter(layer_name + 'InstanceType',
Type='String',
Default=default_instance_type,
Description='Instance type for instances launched within the ' + layer_name + ' auto scaling group',
AllowedValues=self.strings['valid_instance_types'],
ConstraintDescription=self.strings['valid_instance_type_message']))
sg_list = []
for sg in security_groups:
if isinstance(sg, Ref):
sg_list.append(sg)
else:
sg_list.append(Ref(sg))
launch_config_obj = autoscaling.LaunchConfiguration(layer_name + 'LaunchConfiguration',
IamInstanceProfile=Ref(instance_profile),
ImageId=FindInMap('RegionMap', Ref('AWS::Region'), ami_name),
InstanceType=Ref(instance_type),
SecurityGroups=sg_list,
KeyName=ec2_key,
UserData=user_data,
InstanceMonitoring=instance_monitoring)
block_devices = [ec2.BlockDeviceMapping(
DeviceName='/dev/sda1',
Ebs=ec2.EBSBlockDevice(
VolumeSize=root_volume_size))]
device_names = ['/dev/sd%s' % c for c in 'bcdefghijklmnopqrstuvwxyz']
if ebs_data_volumes != None and len(ebs_data_volumes) > 0:
for ebs_volume in ebs_data_volumes:
device_name = device_names.pop()
ebs_block_device = ec2.EBSBlockDevice(
DeleteOnTermination=ebs_volume.get('delete_on_termination', True),
VolumeSize=ebs_volume.get('size', '100'),
VolumeType=ebs_volume.get('type', 'gp2'))
if 'iops' in ebs_volume:
ebs_block_device.Iops = int(ebs_volume.get('iops'))
if 'snapshot_id' in ebs_volume:
ebs_block_device.SnapshotId = ebs_volume.get('snapshot_id')
block_devices.append(ec2.BlockDeviceMapping(
DeviceName = device_name,
Ebs = ebs_block_device))
if include_ephemerals and number_ephemeral_vols > 0:
device_names.reverse()
for x in range(0, number_ephemeral_vols):
device_name = device_names.pop()
block_devices.append(ec2.BlockDeviceMapping(
DeviceName= device_name,
VirtualName= 'ephemeral' + str(x)))
if len(block_devices) > 0:
launch_config_obj.BlockDeviceMappings = block_devices
launch_config = self.template.add_resource(launch_config_obj)
auto_scaling_obj = autoscaling.AutoScalingGroup(layer_name + 'AutoScalingGroup',
AvailabilityZones=self.azs,
LaunchConfigurationName=Ref(launch_config),
MaxSize=max_size,
MinSize=min_size,
DesiredCapacity=min(min_size, max_size),
VPCZoneIdentifier=self.subnets[subnet_type.lower()])
if load_balancer != None:
auto_scaling_obj.LoadBalancerNames = [Ref(load_balancer)]
if custom_tags != None and len(custom_tags) > 0:
if type(custom_tags) != list:
custom_tags = [custom_tags]
auto_scaling_obj.Tags = custom_tags
else:
auto_scaling_obj.Tags = []
auto_scaling_obj.Tags.append(autoscaling.Tag('Name', layer_name, True))
return self.template.add_resource(auto_scaling_obj)
def __init_region_map(self,
region_list):
'''
Internal helper method used to check to ensure mapping dictionaries are present
@param region_list [list(str)] array of strings representing the names of the regions to validate and/or create within the RegionMap CloudFormation mapping
'''
if 'RegionMap' not in self.template.mappings:
self.template.mappings['RegionMap'] = {}
for region_name in region_list:
if region_name not in self.template.mappings['RegionMap']:
self.template.mappings['RegionMap'][region_name] = {}
def create_reciprocal_sg(self,
source_group,
source_group_name,
destination_group,
destination_group_name,
from_port,
to_port=None,
ip_protocol='tcp'):
'''
Helper method creates reciprocal ingress and egress rules given two existing security groups and a set of ports
@param source_group [Troposphere.ec2.SecurityGroup] Object reference to the source security group
@param source_group_name [string] friendly name of the source security group used for labels
@param destination_group [Troposphere.ec2.SecurityGroup] Object reference to the destination security group
@param destination_group_name [string] friendly name of the destination security group used for labels
@param from_port [string] lower boundary of the port range to set for the secuirty group rules
@param to_port [string] upper boundary of the port range to set for the security group rules
@param ip_protocol [string] name of the IP protocol to set this rule for
'''
if to_port == None:
to_port = from_port
if isinstance(from_port, unicode):
from_port = from_port.encode('ascii', 'ignore')
if isinstance(to_port, unicode):
to_port = to_port.encode('ascii', 'ignore')
if from_port == to_port:
if isinstance(from_port, str):
label_suffix = ip_protocol.capitalize() + from_port
else:
label_suffix = ip_protocol.capitalize() + 'Mapped'
else:
if isinstance(from_port, str) and isinstance(to_port, str):
label_suffix = ip_protocol.capitalize() + from_port + 'To' + to_port
else:
label_suffix = ip_protocol.capitalize() + 'MappedPorts'
self.template.add_resource(ec2.SecurityGroupIngress(destination_group_name + 'Ingress' + source_group_name + label_suffix,
SourceSecurityGroupId=Ref(source_group),
GroupId=Ref(destination_group),
FromPort=from_port,
ToPort=to_port,
IpProtocol=ip_protocol))
self.template.add_resource(ec2.SecurityGroupEgress(source_group_name + 'Egress' + destination_group_name + label_suffix,
DestinationSecurityGroupId=Ref(destination_group),
GroupId=Ref(source_group),
FromPort=from_port,
ToPort=to_port,
IpProtocol=ip_protocol))
def to_json(self):
'''
Centralized method for managing outputting this template with a timestamp identifying when it was generated and for creating a SHA256 hash representing the template for validation purposes
'''
if 'dateGenerated' not in self.template.outputs:
self.template.add_output(Output('dateGenerated',
Value=str(datetime.utcnow()),
Description='UTC datetime representation of when this template was generated'))
if 'templateValidationHash' not in self.template.outputs:
m = hashlib.sha256()
m.update(EnvironmentBase.__validation_formatter(self.template))
self.template.add_output(Output('templateValidationHash',
Value=m.hexdigest(),
Description='Hash of this template that can be used as a simple means of validating whether a template has been changed since it was generated.'))
return self.template.to_json()
@staticmethod
def validate_template_file(cloudformation_template_path,
validation_output_name='templateValidationHash'):
'''
Method takes a file path, reads it and validates the template via the SHA256 checksum that is to be located within the Outputs collection of the cloudFormation template
@param cloudformation_template_path [string] path from which to read the cloudformation template
@param validation_output_name [string] name of the output to use to gather the SHA256 hash to validate
'''
with open(cloudformation_template_path, 'r') as f:
cf_template_contents = f.read()
return EnvironmentBase.validate_template_contents(cf_template_contents, validation_output_name)
@staticmethod
def build_bootstrap(bootstrap_files,
variable_declarations=None,
cleanup_commands=None):
'''
Method encapsulates process of building out the bootstrap given a set of variables and a bootstrap file to source from
Returns base 64-wrapped, joined bootstrap to be applied to an instnace
@param bootstrap_files [ string[] ] list of paths to the bash script(s) to read as the source for the bootstrap action to created
@param variable_declaration [ list ] list of lines to add to the head of the file - used to inject bash variables into the script
@param cleanup_commnds [ string[] ] list of lines to add at the end of the file - used for layer-specific details
'''
ret_val = ['#!/bin/bash']
if variable_declarations != None:
for line in variable_declarations:
ret_val.append(line)
for bootstrap_file in bootstrap_files:
for line in EnvironmentBase.get_file_contents(bootstrap_file):
ret_val.append(line)
if cleanup_commands != None:
for line in cleanup_commands:
ret_val.append(line)
return Base64(Join("\n", ret_val))
@staticmethod
def get_file_contents(file_name):
'''
Method encpsulates reading a file into a list while removing newline characters
@param file_name [string] path to file to read
'''
ret_val = []
with open(file_name) as f:
content = f.readlines()
for line in content:
if not line.startswith('#~'):
ret_val.append(line.replace("\n", ""))
return ret_val
@staticmethod
def __validation_formatter(cf_template):
'''
Validation formatter helps to ensure consistent formatting for hash validation workflow
@param json_string [string | Troposphere.Template | dict] JSON-able data to be formatted for validation
'''
if type(cf_template) == Template:
json_string = json.dumps(json.loads(cf_template.to_json()))
elif type(cf_template) == dict:
json_string = json.dumps(cf_template)
return json.dumps(json.loads(json_string), separators=(',',':'))
@staticmethod
def validate_template_contents(cloudformation_template_string,
validation_output_name='templateValidationHash'):
'''
Method takes the contents of a CloudFormation template and validates the SHA256 hash
@param cloudformation_template_string [string] string contents of the CloudFormation template to validate
@param validation_output_name [string] name of the CloudFormation output containing the SHA256 hash to be validated
'''
template_object = json.loads(cloudformation_template_string)
if 'Outputs' in template_object:
if validation_output_name in template_object['Outputs']:
if 'Value' in template_object['Outputs'][validation_output_name]:
hash_to_validate = template_object['Outputs'][validation_output_name]['Value']
del template_object['Outputs'][validation_output_name]
m = hashlib.sha256()
m.update(EnvironmentBase.__validation_formatter(template_object))
template_hash = m.hexdigest()
print '* hash to validate: ' + hash_to_validate
print '* calculated hash: ' + template_hash
if hash_to_validate == template_hash:
print 'Template is valid'
else:
raise RuntimeError('Template hash is not valid')
else:
print 'Cannot validate this template as it appears it is corrupt. The [' + validation_output_name + '] output does not contain a value property.'
else:
print 'Cannot validate this template as it does not contain the specified output [' + validation_output_name + '] - check to make sure this is the right name and try again.'
else:
print 'This template does not contain a collection of outputs. Please check the input template and try again.'
def create_instance_profile(self,
layer_name,
iam_policies=None):
'''
Helper method creates an IAM Role and Instance Profile for the optoinally specified IAM policies
@param layer_name [string] friendly name for the Role and Instance Profile used for naming and path organization
@param iam_policies [Troposphere.iam.Policy[]] array of IAM Policies to be associated with the Role and Instance Profile created
'''
iam_role_obj = iam.Role(layer_name + 'IAMRole',
AssumeRolePolicyDocument={
'Statement': [{
'Effect': 'Allow',
'Principal': {'Service': ['ec2.amazonaws.com']},
'Action': ['sts:AssumeRole']
}]},
Path=Join('',['/' + self.globals.get('environment_name', 'environmentbase') + '/', layer_name , '/']))
if iam_policies != None:
iam_role_obj.Policies = iam_policies
iam_role = self.template.add_resource(iam_role_obj)
return self.template.add_resource(iam.InstanceProfile(layer_name + 'InstancePolicy',
Path='/' + self.globals.get('environment_name', 'environmentbase') + '/',
Roles=[Ref(iam_role)]))
def add_child_template(self,
name,
template,
s3_bucket=None,
s3_key_prefix=None,
s3_canned_acl=None):
'''
Method adds a child template to this object's template and binds the child template parameters to properties, resources and other stack outputs
@param name [str] name of this template for key naming in s3
@param template [Troposphere.Template] Troposphere Template object to add as a child to this object's template
@param template_args [dict] key-value pair of configuration values for templates to apply to this operation
@param s3_bucket [str] name of the bucket to upload keys to - will default to value in template_args if not present
@param s3_key_prefix [str] s3 key name prefix to prepend to s3 key path - will default to value in template_args if not present
@param s3_canned_acl [str] name of the s3 canned acl to apply to templates uploaded to S3 - will default to value in template_args if not present
'''
key_serial = str(int(time.time()))
if s3_bucket == None:
s3_bucket = self.template_args.get('s3_bucket')
if s3_bucket == None:
raise RuntimeError('Cannot upload template to s3 as a s3 bucket was not specified nor set as a default')
if s3_key_prefix == None:
s3_key_prefix = self.template_args.get('s3_key_name_prefix', '')
if s3_key_prefix == None:
s3_key_name = '/' + name + '.' + key_serial + '.template'
else:
s3_key_name = s3_key_prefix + '/' + name + '.' + key_serial + '.template'
if s3_canned_acl == None:
s3_canned_acl = self.template_args.get('s3_canned_acl', 'private')
if self.template_args.get('mock_upload',False):
stack_url = 'http://www.dualspark.com'
else:
conn = boto.connect_s3()
bucket = conn.get_bucket(s3_bucket)
key = Key(bucket)
key.key = s3_key_name
key.set_contents_from_string(template.to_json())
key.set_acl(s3_canned_acl)
stack_url = key.generate_url(expires_in=0, query_auth=False)
if name not in self.stack_outputs:
self.stack_outputs[name] = []
stack_params = {}
for parameter in template.parameters.keys():
if parameter in self.manual_parameter_bindings:
stack_params[parameter] = self.manual_parameter_bindings[parameter]
elif parameter.startswith('availabilityZone'):
stack_params[parameter] = GetAtt('privateSubnet' + parameter.replace('availabilityZone',''), 'AvailabilityZone')
elif parameter in self.template.parameters.keys():
stack_params[parameter] = Ref(self.template.parameters.get(parameter))
elif parameter in self.template.resources.keys():
stack_params[parameter] = Ref(self.template.resources.get(parameter))
elif parameter in self.stack_outputs:
stack_params[parameter] = GetAtt(self.stack_outputs[parameter], 'Outputs.' + parameter)
else:
stack_params[parameter] = Ref(self.template.add_parameter(template.parameters[parameter]))
stack_name = name + 'Stack'
for output in template.outputs:
if output not in self.stack_outputs:
self.stack_outputs[output] = stack_name
else:
raise RuntimeError('Cannot add child stack with output named ' + output + ' as it was already added by stack named ' + self.stack_outputs[output])
self.template.add_resource(cf.Stack(stack_name,
TemplateURL=stack_url,
Parameters=stack_params,
TimeoutInMinutes=self.template_args.get('timeout_in_minutes', '60')))
if __name__ == '__main__':
import json
with open('config_args.json', 'r') as f:
cmd_args = json.loads(f.read())
test = EnvironmentBase(cmd_args)
print test.to_json()
|
#! /usr/bin/python
from __future__ import print_function
import argparse
import os
import re
import sys
import urllib2
import zipfile
from build import Build
from constants import (
build_url,
datetime_format,
datetime_re,
hosted_build_url_template,
archive_directory,
archived_build_file_template,
archived_build_file_inner_path,
extraction_directory,
extracted_build_file_template,
)
def download_file(url, destination):
print('Downloading %s as %s...' % (url, destination), end='')
sys.stdout.flush()
try:
with open(destination, 'w') as writer:
source = urllib2.urlopen(url)
writer.write(source.read())
except:
os.remove(destination)
raise
print('done')
def extract_file(source, inner_path, destination):
print('Extracting %s from %s as %s...' % (source, inner_path, destination), end='')
with zipfile.ZipFile(source, 'r') as archive:
with open(destination, 'w') as writer:
writer.write(archive.read(inner_path))
print('done')
def ensure_directory(directory):
if not os.path.exists(directory):
os.mkdir(directory)
def ensure_archived_build_file(build):
ensure_directory(archive_directory)
archived_build_file = archived_build_file_template % build.tuple()
if not os.path.exists(archived_build_file):
download_file(hosted_build_url_template % build.commit, archived_build_file)
return archived_build_file
def ensure_build_file(build):
ensure_directory(extraction_directory)
extracted_build_file = extracted_build_file_template % build.tuple()
if not os.path.exists(extracted_build_file):
archived_build_file = ensure_archived_build_file(build)
extract_file(archived_build_file, archived_build_file_inner_path, extracted_build_file)
return extracted_build_file
def get_command_line_args():
parser = argparse.ArgumentParser()
parser.add_argument('--commit', type=str, help='The build commit to download.')
parser.add_argument('--datetime', type=str, help='The datetime to associate with the build.')
args = parser.parse_args()
if not args.commit:
print('Build commit missing.')
parser.print_help()
sys.exit(1)
if not re.match(datetime_re, args.datetime):
print('Invalid datetime.')
parser.print_help()
sys.exit(1)
return args
def main():
args = get_command_line_args()
build_file = ensure_build_file(Build(args.datetime, args.commit))
print('Build file: ', build_file)
if __name__ == '__main__':
main()
Fix print statements
#! /usr/bin/python
from __future__ import print_function
import argparse
import os
import re
import sys
import urllib2
import zipfile
from build import Build
from constants import (
build_url,
datetime_format,
datetime_re,
hosted_build_url_template,
archive_directory,
archived_build_file_template,
archived_build_file_inner_path,
extraction_directory,
extracted_build_file_template,
)
def download_file(url, destination):
print('Downloading\n\t%s\nas\n\t%s\n...' % (url, destination), end='')
sys.stdout.flush()
try:
with open(destination, 'w') as writer:
source = urllib2.urlopen(url)
writer.write(source.read())
except:
os.remove(destination)
raise
print('done')
def extract_file(source, inner_path, destination):
print('Extracting\n\t%s\nfrom\n\t%s\nas\n\t%s\n...' % (inner_path, source, destination), end='')
with zipfile.ZipFile(source, 'r') as archive:
with open(destination, 'w') as writer:
writer.write(archive.read(inner_path))
print('done')
def ensure_directory(directory):
if not os.path.exists(directory):
os.mkdir(directory)
def ensure_archived_build_file(build):
ensure_directory(archive_directory)
archived_build_file = archived_build_file_template % build.tuple()
if not os.path.exists(archived_build_file):
download_file(hosted_build_url_template % build.commit, archived_build_file)
return archived_build_file
def ensure_build_file(build):
ensure_directory(extraction_directory)
extracted_build_file = extracted_build_file_template % build.tuple()
if not os.path.exists(extracted_build_file):
archived_build_file = ensure_archived_build_file(build)
extract_file(archived_build_file, archived_build_file_inner_path, extracted_build_file)
return extracted_build_file
def get_command_line_args():
parser = argparse.ArgumentParser()
parser.add_argument('--commit', type=str, help='The build commit to download.')
parser.add_argument('--datetime', type=str, help='The datetime to associate with the build.')
args = parser.parse_args()
if not args.commit:
print('Build commit missing.')
parser.print_help()
sys.exit(1)
if not re.match(datetime_re, args.datetime):
print('Invalid datetime.')
parser.print_help()
sys.exit(1)
return args
def main():
args = get_command_line_args()
build_file = ensure_build_file(Build(args.datetime, args.commit))
print('Build file: ', build_file)
if __name__ == '__main__':
main()
|
# vim:ts=4:sts=4:sw=4:expandtab
# used in code generated by generate_attribute_group()
from types import NoneType
from satori.core.export import ExportClass, ExportModel, ExportMethod
from satori.core.export import BadAttributeType, Attribute, AnonymousAttribute, AttributeGroupField, DefaultAttributeGroupField
from satori.core.export import PCDeny, PCPermit, PCArg, PCArgField, PCGlobal, PCAnd, PCOr, PCEach, PCEachKey, PCEachValue, PCTokenUser, PCRawBlob
from satori.core.export import PCTokenIsUser, PCTokenIsMachine
from satori.core.export import token_container
from satori.core.export import Struct, DefineException, TypedList, TypedMap
from satori.core.export import DjangoId, DjangoStruct, DjangoIdList, DjangoStructList
from satori.core.export import CannotDeleteObject
def _load_models():
import _ast
import ast
import os
import re
import sys
from satori.core.export._topsort import topsort, CycleError
modules = []
entities_dir = os.path.join(os.path.split(__file__)[0], 'entities')
for filename in os.listdir(entities_dir):
if re.match(r'^[a-zA-Z][_a-zA-Z]*\.py$', filename):
filename = os.path.join(entities_dir, filename)
with open(filename, 'r') as f:
moduleast = ast.parse(f.read(), filename)
modulecode = compile(moduleast, filename, 'exec')
provides = set()
uses = set()
for node in moduleast.body:
if isinstance(node, _ast.Assign):
provides.update([x.id for x in node.targets])
if isinstance(node, _ast.ClassDef):
provides.add(node.name)
if isinstance(node, _ast.FunctionDef):
provides.add(node.name)
if isinstance(node, _ast.ImportFrom):
if node.module == 'satori.core.models':
uses.update([x.name for x in node.names])
modules.append((modulecode, uses, provides))
pairs = []
providers = {}
for (code, uses, provides) in modules:
for provide in provides:
if provide in providers:
print 'Two modules provide {0}'.format(provide)
raise ImportError('Two modules provide {0}'.format(provide))
providers[provide] = code
for (code, uses, provides) in modules:
for use in uses:
if not use in providers:
print 'No module provides {0}'.format(use)
raise ImportError('No module provides {0}'.format(use))
pairs.append((providers[use], code))
# so isolated nodes are in the result
pairs.append((None, code))
try:
codes = topsort(pairs)[1:]
except CycleError:
print 'There is a cycle in module dependencies'
raise ImportError('There is a cycle in module dependencies')
module = sys.modules[__name__]
for code in codes:
exec code in module.__dict__
import satori.core.export
import satori.core.export.oa
import satori.core.export.pc
import satori.core.export.type_helpers
import satori.core.export.types_django
import satori.core.export.token
satori.core.export.init()
satori.core.export.oa.init()
satori.core.export.pc.init()
satori.core.export.type_helpers.init()
satori.core.export.types_django.init()
satori.core.export.token.init()
_load_models()
Added InvalidArgument exception. Added *Struct to satori.core.models (2/2).
# vim:ts=4:sts=4:sw=4:expandtab
# used in code generated by generate_attribute_group()
from types import NoneType
from satori.core.export import ExportClass, ExportModel, ExportMethod
from satori.core.export import BadAttributeType, Attribute, AnonymousAttribute, AttributeGroupField, DefaultAttributeGroupField
from satori.core.export import PCDeny, PCPermit, PCArg, PCArgField, PCGlobal, PCAnd, PCOr, PCEach, PCEachKey, PCEachValue, PCTokenUser, PCRawBlob
from satori.core.export import PCTokenIsUser, PCTokenIsMachine
from satori.core.export import token_container
from satori.core.export import Struct, DefineException, TypedList, TypedMap
from satori.core.export import DjangoId, DjangoStruct, DjangoIdList, DjangoStructList
from satori.core.export import CannotDeleteObject, InvalidArgument
def _load_models():
import _ast
import ast
import os
import re
import sys
from satori.core.export._topsort import topsort, CycleError
modules = []
entities_dir = os.path.join(os.path.split(__file__)[0], 'entities')
for filename in os.listdir(entities_dir):
if re.match(r'^[a-zA-Z][_a-zA-Z]*\.py$', filename):
filename = os.path.join(entities_dir, filename)
with open(filename, 'r') as f:
moduleast = ast.parse(f.read(), filename)
modulecode = compile(moduleast, filename, 'exec')
provides = set()
uses = set()
for node in moduleast.body:
if isinstance(node, _ast.Assign):
provides.update([x.id for x in node.targets])
if isinstance(node, _ast.ClassDef):
provides.add(node.name)
if isinstance(node, _ast.FunctionDef):
provides.add(node.name)
if isinstance(node, _ast.ImportFrom):
if node.module == 'satori.core.models':
uses.update([x.name for x in node.names])
modules.append((modulecode, uses, provides))
pairs = []
providers = {}
for (code, uses, provides) in modules:
for provide in provides:
if provide in providers:
print 'Two modules provide {0}'.format(provide)
raise ImportError('Two modules provide {0}'.format(provide))
providers[provide] = code
for (code, uses, provides) in modules:
for use in uses:
if not use in providers:
print 'No module provides {0}'.format(use)
raise ImportError('No module provides {0}'.format(use))
pairs.append((providers[use], code))
# so isolated nodes are in the result
pairs.append((None, code))
try:
codes = topsort(pairs)[1:]
except CycleError:
print 'There is a cycle in module dependencies'
raise ImportError('There is a cycle in module dependencies')
module = sys.modules[__name__]
for code in codes:
exec code in module.__dict__
import satori.core.export
import satori.core.export.oa
import satori.core.export.pc
import satori.core.export.type_helpers
import satori.core.export.types_django
import satori.core.export.token
satori.core.export.init()
satori.core.export.oa.init()
satori.core.export.pc.init()
satori.core.export.type_helpers.init()
satori.core.export.types_django.init()
satori.core.export.token.init()
glob = globals()
for ars_struct in satori.core.export.types_django.ars_django_structure.values():
glob[ars_struct.name] = ars_struct.get_class()
_load_models()
|
import pytest
from seaworthy.checks import docker_client
from seaworthy.definitions import (
ContainerDefinition, NetworkDefinition, VolumeDefinition)
from seaworthy.helpers import fetch_image
from seaworthy.pytest import dockertest
IMG = 'nginx:alpine'
@dockertest()
class PytestFixtureMixin:
def make_definition(self, name):
raise NotImplementedError() # pragma: no cover
def test_setup_teardown(self, request, docker_helper):
"""
The ``pytest_fixture()`` method should return a fixture. That fixture
should yield the definition with its resource created and when yielded
again, the resource should be removed.
"""
fixture = self.make_definition('test').pytest_fixture('test')
fixture_gen = fixture(request, docker_helper)
definition = next(fixture_gen)
# Resrouce has been created
assert definition.created
# Test things are torn down
with pytest.raises(StopIteration):
next(fixture_gen)
# Resource has been removed
assert not definition.created
class TestContainerDefinition(PytestFixtureMixin):
@classmethod
def setUpClass(cls):
with docker_client() as client:
fetch_image(client, IMG)
def make_definition(self, name):
return ContainerDefinition(name, IMG)
def test_clean_fixtures(self, request, docker_helper):
"""
The fixture returned by the ``pytest_clean_fixture()`` method should
yield a started container, and afterwards stop and remove the
container.
"""
raw_fixture, fixture = ContainerDefinition(
name='test', image=IMG).pytest_clean_fixtures('test')
fixture_gen = raw_fixture(request, docker_helper)
# TODO: Assert on cleaning fixture
container = next(fixture_gen)
assert isinstance(container, ContainerDefinition)
assert container.inner().status == 'running'
# Test things are torn down
with pytest.raises(StopIteration):
next(fixture_gen)
# Container has been stopped and removed
assert not container.created
class TestNetworkDefinition(PytestFixtureMixin):
def make_definition(self, name):
return NetworkDefinition(name)
class TestVolumeDefinition(PytestFixtureMixin):
def make_definition(self, name):
return VolumeDefinition(name)
Use the right method name for pytest
import pytest
from seaworthy.checks import docker_client
from seaworthy.definitions import (
ContainerDefinition, NetworkDefinition, VolumeDefinition)
from seaworthy.helpers import fetch_image
from seaworthy.pytest import dockertest
IMG = 'nginx:alpine'
@dockertest()
class PytestFixtureMixin:
def make_definition(self, name):
raise NotImplementedError() # pragma: no cover
def test_setup_teardown(self, request, docker_helper):
"""
The ``pytest_fixture()`` method should return a fixture. That fixture
should yield the definition with its resource created and when yielded
again, the resource should be removed.
"""
fixture = self.make_definition('test').pytest_fixture('test')
fixture_gen = fixture(request, docker_helper)
definition = next(fixture_gen)
# Resrouce has been created
assert definition.created
# Test things are torn down
with pytest.raises(StopIteration):
next(fixture_gen)
# Resource has been removed
assert not definition.created
class TestContainerDefinition(PytestFixtureMixin):
@classmethod
def setup_class(cls):
with docker_client() as client:
fetch_image(client, IMG)
def make_definition(self, name):
return ContainerDefinition(name, IMG)
def test_clean_fixtures(self, request, docker_helper):
"""
The fixture returned by the ``pytest_clean_fixture()`` method should
yield a started container, and afterwards stop and remove the
container.
"""
raw_fixture, fixture = ContainerDefinition(
name='test', image=IMG).pytest_clean_fixtures('test')
fixture_gen = raw_fixture(request, docker_helper)
# TODO: Assert on cleaning fixture
container = next(fixture_gen)
assert isinstance(container, ContainerDefinition)
assert container.inner().status == 'running'
# Test things are torn down
with pytest.raises(StopIteration):
next(fixture_gen)
# Container has been stopped and removed
assert not container.created
class TestNetworkDefinition(PytestFixtureMixin):
def make_definition(self, name):
return NetworkDefinition(name)
class TestVolumeDefinition(PytestFixtureMixin):
def make_definition(self, name):
return VolumeDefinition(name)
|
# -*- coding: utf-8 -*-
from functools import wraps
from datetime import datetime
import os
import re
from mailpy.view import MailView
from mailpy.response import TextMailResponse
from mailpy.exceptions import MailViewError
from mailpy.contrib.filelock import FileLock, FileLockTimeout
from mailpy.contrib.pelican.api import PelicanAPI
from mailpy.contrib.pelican.utils import stringify, slugify
from mailpy.contrib.pelican.content import RstArticle
from mailpy.contrib.pelican.exceptions import FileNotFound, FileAlreadyExists, UnknownFileFormat
__all__ = ('PelicanMailView',)
def lock(fun):
"""Lock decorator"""
@wraps(fun)
def wrap(obj, request, *args, **kwargs):
flock = FileLock(obj.lock_file)
try:
flock.acquire()
except FileLockTimeout as exc:
raise MailViewError(request, 'Locked: %s' % exc, status_code=423)
try:
return fun(obj, request, *args, **kwargs)
finally:
flock.release()
return wrap
class PelicanMailView(MailView):
"""
Pelican mail view.
"""
settings_file = NotImplemented
article_class = RstArticle # Used only for new articles
article_file_name = '%Y-%m-%d-{slug}' # Without extension; valid placeholders: {slug} and strftime() directives
papi_class = PelicanAPI
papi_settings = ()
site_url = None
authors = ()
lock_file = None
_valid_content_maintypes = frozenset(('text', 'image', 'audio', 'video', 'application'))
_valid_text_content_type = frozenset(('text/plain',))
_ignored_file_content_types = frozenset(('text/x-vcard', 'text/vcard',
'application/x-pkcs12',
'application/x-pkcs7-signature',
'application/x-pkcs7-mime',
'application/pkcs12',
'application/pkcs7-signature',
'application/pkcs7-mime'))
def __init__(self):
super(PelicanMailView, self).__init__()
# Initialize the Pelican API
self.papi = self.papi_class(self.settings_file, **dict(self.papi_settings))
self.lock_file = self.lock_file or self.settings_file + '.lock'
@property
def _site_url(self):
"""Return site URL"""
return self.site_url or self.papi.site_url
def _get_author_from_email(self, email, default=None):
"""Helper for mail views"""
for author, emails in self.authors:
if email in emails:
return author
return default
def _create_article_slug(self, title, articles):
"""Create unique article slug from title"""
slug = orig_slug = slugify(title)
slugs = self.papi.get_article_slugs(articles=articles) # dict {slug: article}
i = 1
while slug in slugs:
i += 1
slug = '%s-%d' % (orig_slug, i)
return slug
def __create_article_filename(self, slug, addon=''):
"""Generate new article filename"""
name = datetime.now().strftime(self.article_file_name)
filename = name.format(slug=slug)
return '%s%s%s' % (filename, addon, self.article_class.extension)
def _create_article_filename(self, slug, articles):
"""Create new unique filename from title"""
filename = self.__create_article_filename(slug)
filenames = set(a.filename for a in articles)
i = 1
while filename in filenames:
i += 1
filename = self.__create_article_filename(slug, addon='-%d' % i)
return filename
def _create_article(self, title):
"""Create new PelicanArticle object"""
articles = self.papi.articles
slug = self._create_article_slug(title, articles)
filename = self._create_article_filename(slug, articles)
return self.article_class(self.papi.content_path, filename)
def _create_static_filename(self, maintype, orig_filename):
"""Create proper filename for static file according to original filename"""
if maintype == 'image':
directory = self.papi.images_dir
else:
directory = self.papi.files_dir
orig_filename = stringify(orig_filename).strip()
if not orig_filename:
orig_filename = 'noname'
filename = os.path.join(directory, orig_filename)
static_files = set(self.papi.get_static_files(directory))
name, ext = os.path.splitext(filename)
i = 1
while filename in static_files:
i += 1
filename = '%s-%d%s' % (name, i, ext)
return filename
@staticmethod
def _get_msg_text(msg_part, fallback_charset=None):
"""Return text in mime message converted into unicode str"""
content = msg_part.get_payload(decode=True)
charset = msg_part.get_content_charset() or fallback_charset or 'ascii'
return content.decode(charset, 'replace')
@staticmethod
def _edit_msg_text(text):
"""Process text extracted from mail message and return text suitable for article content"""
# Fix automatic links injected by mail clients, e.g. "<www.google.com> <http://www.google.com>"
return re.sub(r'([^\s]+) <([\w\+]+:/*)?\1/?>(?!`_)', r'\1', text)
def _get_msg_content(self, msg, article):
"""Parse message and retrieve text content and additional file attachments"""
text = []
files = []
for part in msg.walk():
content_type = part.get_content_type()
maintype = part.get_content_maintype()
if maintype in self._valid_content_maintypes:
orig_filename = part.get_filename()
if orig_filename: # Attached file
if content_type in self._ignored_file_content_types:
continue # Ignore vcard, digital signatures and stuff like this
filename = self._create_static_filename(maintype, orig_filename)
if maintype == 'image':
text_data = article.image(orig_filename, '{filename}/%s' % filename)
else:
text_data = article.internal_link(orig_filename, '{filename}/%s' % filename)
text.append(text_data)
files.append(self.papi.get_static_file(filename, content=part.get_payload(decode=True),
encoding=part.get_content_charset())) # Store raw
elif content_type in self._valid_text_content_type: # Article text
msg_text = self._get_msg_text(part, msg.get_charset()) # Decode using content charset
text.append(self._edit_msg_text(msg_text))
return '\n\n'.join(text), files
def _get_article_metadata(self, request, article, text):
"""Create article metadata"""
metadata = {
'date': datetime.now().strftime('%Y-%m-%d %H:%M'),
'authors': self._get_author_from_email(request.sender, request.sender),
}
new_text, parsed_metadata = article.get_text_metadata(text)
metadata.update(parsed_metadata)
return text, metadata
@staticmethod
def _save_article(request, article, static_files):
"""Save article file and all static files; Delete already created files in case of an error"""
created = []
try:
for static_file in static_files:
static_file.save()
created.append(static_file.full_path)
try:
article.save()
except FileAlreadyExists:
raise MailViewError(request, 'Article "%s" already exists' % article, status_code=406)
else:
created.append(article.full_path)
except Exception as exc:
for f in created:
# noinspection PyBroadException
try:
os.remove(f)
except:
pass
raise exc # Re-raise original exception
return created
# noinspection PyUnusedLocal
@staticmethod
def _delete_article(request, article):
"""Delete article"""
# TODO: delete related static files
article.delete()
return [article.full_path]
def _get_article(self, request, title_or_filename):
"""Fetch existing article according to title or filename"""
try:
try:
return self.papi.get_article(title_or_filename)
except UnknownFileFormat:
return self.papi.get_article_by_slug(slugify(title_or_filename.lstrip('Re: ')))
except FileNotFound:
raise MailViewError(request, 'Article "%s" was not found' % title_or_filename, status_code=404)
def _commit_and_publish(self, commit_msg, **commit_kwargs):
"""Commit to git if repo_path is set and update html files"""
if commit_msg and self.papi.repo_path:
self.papi.commit(commit_msg, **commit_kwargs)
self.papi.publish()
def _response(self, request, msg, **kwargs):
"""Create nice mail response"""
site_url = self.site_url
if site_url and site_url.startswith('http'):
msg += '\n\n--\n%s\n' % site_url
return TextMailResponse(request, msg, **kwargs)
def get(self, request):
"""Return list of blog posts or content of one blog post depending on the subject"""
filename = request.subject.strip()
if filename:
article = self._get_article(request, filename)
res = article.load()
else:
res = '\n'.join(a.filename for a in self.papi.articles)
return self._response(request, res)
@lock
def post(self, request):
"""Create new blog post, commit and rebuild the html output"""
title = request.subject.strip()
if not title:
raise MailViewError(request, 'Subject (title) is required')
article = self._create_article(title)
text, static_files = self._get_msg_content(request, article)
text, metadata = self._get_article_metadata(request, article, text)
article.compose(title, text, metadata)
created = self._save_article(request, article, static_files)
commit_msg = 'Added article %s' % article.filename
if static_files:
commit_msg += ' + static files:\n\t+ %s' % '\n\t+ '.join(i.filename for i in static_files)
self._commit_and_publish(commit_msg, add=created)
sep = '*' * 40
out = 'Article "%s" was successfully created\n\n%s\n%s\n%s' % (article.filename, sep, article.content, sep)
return self._response(request, out)
@lock
def delete(self, request):
"""Delete one blog post, commit and rebuild the html output"""
filename = request.subject.strip()
if not filename:
raise MailViewError(request, 'Subject (filename) is required')
article = self._get_article(request, filename)
deleted = self._delete_article(request, article)
self._commit_and_publish('Deleted article %s' % article, remove=deleted)
return self._response(request, 'Article "%s" was successfully deleted' % article.filename)
Fixed custom metadata parsing - Issue #2
# -*- coding: utf-8 -*-
from functools import wraps
from datetime import datetime
import os
import re
from mailpy.view import MailView
from mailpy.response import TextMailResponse
from mailpy.exceptions import MailViewError
from mailpy.contrib.filelock import FileLock, FileLockTimeout
from mailpy.contrib.pelican.api import PelicanAPI
from mailpy.contrib.pelican.utils import stringify, slugify
from mailpy.contrib.pelican.content import RstArticle
from mailpy.contrib.pelican.exceptions import FileNotFound, FileAlreadyExists, UnknownFileFormat
__all__ = ('PelicanMailView',)
def lock(fun):
"""Lock decorator"""
@wraps(fun)
def wrap(obj, request, *args, **kwargs):
flock = FileLock(obj.lock_file)
try:
flock.acquire()
except FileLockTimeout as exc:
raise MailViewError(request, 'Locked: %s' % exc, status_code=423)
try:
return fun(obj, request, *args, **kwargs)
finally:
flock.release()
return wrap
class PelicanMailView(MailView):
"""
Pelican mail view.
"""
settings_file = NotImplemented
article_class = RstArticle # Used only for new articles
article_file_name = '%Y-%m-%d-{slug}' # Without extension; valid placeholders: {slug} and strftime() directives
papi_class = PelicanAPI
papi_settings = ()
site_url = None
authors = ()
lock_file = None
_valid_content_maintypes = frozenset(('text', 'image', 'audio', 'video', 'application'))
_valid_text_content_type = frozenset(('text/plain',))
_ignored_file_content_types = frozenset(('text/x-vcard', 'text/vcard',
'application/x-pkcs12',
'application/x-pkcs7-signature',
'application/x-pkcs7-mime',
'application/pkcs12',
'application/pkcs7-signature',
'application/pkcs7-mime'))
def __init__(self):
super(PelicanMailView, self).__init__()
# Initialize the Pelican API
self.papi = self.papi_class(self.settings_file, **dict(self.papi_settings))
self.lock_file = self.lock_file or self.settings_file + '.lock'
@property
def _site_url(self):
"""Return site URL"""
return self.site_url or self.papi.site_url
def _get_author_from_email(self, email, default=None):
"""Helper for mail views"""
for author, emails in self.authors:
if email in emails:
return author
return default
def _create_article_slug(self, title, articles):
"""Create unique article slug from title"""
slug = orig_slug = slugify(title)
slugs = self.papi.get_article_slugs(articles=articles) # dict {slug: article}
i = 1
while slug in slugs:
i += 1
slug = '%s-%d' % (orig_slug, i)
return slug
def __create_article_filename(self, slug, addon=''):
"""Generate new article filename"""
name = datetime.now().strftime(self.article_file_name)
filename = name.format(slug=slug)
return '%s%s%s' % (filename, addon, self.article_class.extension)
def _create_article_filename(self, slug, articles):
"""Create new unique filename from title"""
filename = self.__create_article_filename(slug)
filenames = set(a.filename for a in articles)
i = 1
while filename in filenames:
i += 1
filename = self.__create_article_filename(slug, addon='-%d' % i)
return filename
def _create_article(self, title):
"""Create new PelicanArticle object"""
articles = self.papi.articles
slug = self._create_article_slug(title, articles)
filename = self._create_article_filename(slug, articles)
return self.article_class(self.papi.content_path, filename)
def _create_static_filename(self, maintype, orig_filename):
"""Create proper filename for static file according to original filename"""
if maintype == 'image':
directory = self.papi.images_dir
else:
directory = self.papi.files_dir
orig_filename = stringify(orig_filename).strip()
if not orig_filename:
orig_filename = 'noname'
filename = os.path.join(directory, orig_filename)
static_files = set(self.papi.get_static_files(directory))
name, ext = os.path.splitext(filename)
i = 1
while filename in static_files:
i += 1
filename = '%s-%d%s' % (name, i, ext)
return filename
@staticmethod
def _get_msg_text(msg_part, fallback_charset=None):
"""Return text in mime message converted into unicode str"""
content = msg_part.get_payload(decode=True)
charset = msg_part.get_content_charset() or fallback_charset or 'ascii'
return content.decode(charset, 'replace')
@staticmethod
def _edit_msg_text(text):
"""Process text extracted from mail message and return text suitable for article content"""
# Fix automatic links injected by mail clients, e.g. "<www.google.com> <http://www.google.com>"
return re.sub(r'([^\s]+) <([\w\+]+:/*)?\1/?>(?!`_)', r'\1', text)
def _get_msg_content(self, msg, article):
"""Parse message and retrieve text content and additional file attachments"""
text = []
files = []
for part in msg.walk():
content_type = part.get_content_type()
maintype = part.get_content_maintype()
if maintype in self._valid_content_maintypes:
orig_filename = part.get_filename()
if orig_filename: # Attached file
if content_type in self._ignored_file_content_types:
continue # Ignore vcard, digital signatures and stuff like this
filename = self._create_static_filename(maintype, orig_filename)
if maintype == 'image':
text_data = article.image(orig_filename, '{filename}/%s' % filename)
else:
text_data = article.internal_link(orig_filename, '{filename}/%s' % filename)
text.append(text_data)
files.append(self.papi.get_static_file(filename, content=part.get_payload(decode=True),
encoding=part.get_content_charset())) # Store raw
elif content_type in self._valid_text_content_type: # Article text
msg_text = self._get_msg_text(part, msg.get_charset()) # Decode using content charset
text.append(self._edit_msg_text(msg_text))
return '\n\n'.join(text), files
def _get_article_metadata(self, request, article, text):
"""Create article metadata"""
metadata = {
'date': datetime.now().strftime('%Y-%m-%d %H:%M'),
'authors': self._get_author_from_email(request.sender, request.sender),
}
new_text, parsed_metadata = article.get_text_metadata(text)
metadata.update(parsed_metadata)
return new_text, metadata
@staticmethod
def _save_article(request, article, static_files):
"""Save article file and all static files; Delete already created files in case of an error"""
created = []
try:
for static_file in static_files:
static_file.save()
created.append(static_file.full_path)
try:
article.save()
except FileAlreadyExists:
raise MailViewError(request, 'Article "%s" already exists' % article, status_code=406)
else:
created.append(article.full_path)
except Exception as exc:
for f in created:
# noinspection PyBroadException
try:
os.remove(f)
except:
pass
raise exc # Re-raise original exception
return created
# noinspection PyUnusedLocal
@staticmethod
def _delete_article(request, article):
"""Delete article"""
# TODO: delete related static files
article.delete()
return [article.full_path]
def _get_article(self, request, title_or_filename):
"""Fetch existing article according to title or filename"""
try:
try:
return self.papi.get_article(title_or_filename)
except UnknownFileFormat:
return self.papi.get_article_by_slug(slugify(title_or_filename.lstrip('Re: ')))
except FileNotFound:
raise MailViewError(request, 'Article "%s" was not found' % title_or_filename, status_code=404)
def _commit_and_publish(self, commit_msg, **commit_kwargs):
"""Commit to git if repo_path is set and update html files"""
if commit_msg and self.papi.repo_path:
self.papi.commit(commit_msg, **commit_kwargs)
self.papi.publish()
def _response(self, request, msg, **kwargs):
"""Create nice mail response"""
site_url = self.site_url
if site_url and site_url.startswith('http'):
msg += '\n\n--\n%s\n' % site_url
return TextMailResponse(request, msg, **kwargs)
def get(self, request):
"""Return list of blog posts or content of one blog post depending on the subject"""
filename = request.subject.strip()
if filename:
article = self._get_article(request, filename)
res = article.load()
else:
res = '\n'.join(a.filename for a in self.papi.articles)
return self._response(request, res)
@lock
def post(self, request):
"""Create new blog post, commit and rebuild the html output"""
title = request.subject.strip()
if not title:
raise MailViewError(request, 'Subject (title) is required')
article = self._create_article(title)
text, static_files = self._get_msg_content(request, article)
text, metadata = self._get_article_metadata(request, article, text)
article.compose(title, text, metadata)
created = self._save_article(request, article, static_files)
commit_msg = 'Added article %s' % article.filename
if static_files:
commit_msg += ' + static files:\n\t+ %s' % '\n\t+ '.join(i.filename for i in static_files)
self._commit_and_publish(commit_msg, add=created)
sep = '*' * 40
out = 'Article "%s" was successfully created\n\n%s\n%s\n%s' % (article.filename, sep, article.content, sep)
return self._response(request, out)
@lock
def delete(self, request):
"""Delete one blog post, commit and rebuild the html output"""
filename = request.subject.strip()
if not filename:
raise MailViewError(request, 'Subject (filename) is required')
article = self._get_article(request, filename)
deleted = self._delete_article(request, article)
self._commit_and_publish('Deleted article %s' % article, remove=deleted)
return self._response(request, 'Article "%s" was successfully deleted' % article.filename)
|
import math, pickle, warnings, time, json, copy
import sympy as sy
from solution import Solution
_error_msg = {
0: ("System expects as maximum as 2 parameters which represent lists "
"of objects and constraints"),
1: "Equation %d cannot be solved against variable %s.",
2: "All objects must have 'equations' attribute.",
3: "At least one equation is required to construct system of equations.",
4: "Arguments must be of type list or tuple."
}
_warn_msg = {
0: ("Variable '%s' has been chosen as a redundant in equation #%d.\n"
"Call edit_sparse() method to replace it with another free variable "
"in the equation."),
1: "More then 1 solution found while replacing redundant variables.",
2: "Keyword argument '%s' is not a valid argument and will be ignored."
}
_info_msg = {
'head': ("=======================================\n"
"total number of variables: %d\n"
"total number of equations: %d\n"
"degrees of freedom: %d\n"
"=======================================\n"),
'independent': "%d independent variables found: ",
'redundant': "%d redundant variables found: ",
'constants': "%d variables marked as constants: ",
'mult': "Redundant variable '%s' has multiple solutions:\n",
'mult_': ("\nBy default last solution has been chosen. "
"Call replace_solution() method to specify other solution.\n"),
'shrinked': "System has been shrinked by %d equations.\n",
'shrinking': "\nShrinking system of equations...\n",
'try_shrink': 'Try shrinking the system by calling shrink() method.',
}
class System(object):
"""
Represents a system of equations.
System of equations can be constructed from a list of equations, list of
objects that have attribute 'equations' (which is a list of equations) or both.
Equations must be a SymPy objects of type Add, Mul or Pow.
Parameters
----------
objects: list of objects that have attribute 'equations'.
constraints: list of equations.
subs: dictionary of variable substitutions.
constants: list of constants in equations.
verbose: controls the output - 0: silent mode; 1 or True: basic output,
2: extended output.
Examples
--------
TODO
"""
# def __init__(self, objects=[], constraints=[], subs={}, constants=[], verbose=True):
def __init__(self, *args, **kwargs):
valid_kwargs = ['subs', 'constants', 'verbose']
# check_kwargs = [kwarg in valid_kwargs for kwarg in kwargs]
for kwarg in kwargs:
if kwarg not in valid_kwargs:
warnings.warn(_warn_msg[2] % kwarg)
subs = kwargs.get(valid_kwargs[0], {})
self.constants = kwargs.get(valid_kwargs[1], [])
self.verbose = kwargs.get(valid_kwargs[2], True)
if len(args) > 2:
raise ValueError(_error_msg[0])
objects = []
constraints = []
oind = None
cind = None
constr_types = [sy.Add, sy.Mul, sy.Pow]
for i, arg in enumerate(args):
if type(arg) not in [list, tuple]:
raise TypeError(_error_msg[4])
if len(arg) > 0:
check = [type(unit) in constr_types for unit in arg]
if not check.count(False):
cind = i
else:
try:
check_obj = [obj.equations for obj in objects]
oind = i
except:
raise ValueError(_error_msg[2])
if oind is not None: objects = list(args[oind])
if cind is not None: constraints = list(args[cind])
# print "objects:", objects
# print "constraints:", constraints
if len(objects + constraints) == 0:
raise ValueError(_error_msg[3])
init_dictattr = ['redundant', 'sparse', 'multisols', 'sparse_edit']
self.__dict__.update((name, {}) for name in init_dictattr)
self.shrink_conv = 0
self.long_varname = 0
try:
eq_list = sum([obj.equations.values() for obj in objects], constraints)
except:
eq_list = sum([obj.equations for obj in objects], constraints)
self.equations = {i: eq.subs(subs) for i, eq in enumerate(eq_list)}
self._copy_equations()
# for i, eq in enumerate(eq_list):
# self.equations[i] = eq.subs(self.subs)
# varlist = list(eq.atoms(sy.Symbol))
# self.variables += varlist
self.inspect()
def _copy_equations(self):
self.equations_tmp = copy.deepcopy(self.equations)
self.tempmap = {i: i for i in self.equations}
def _register_variables(self, add_varlist=[]):
self.__dict__.update((str(v), v) for v in self.variables)
# self.__dict__.update((str(v), v) for v, s in self.sparse.values())
self.__dict__.update((str(v), v) for v in self.redundant)
self.__dict__.update((str(v), v) for v in self.constants)
self.__dict__.update((str(v), v) for v in add_varlist)
# print "self.__dict__:"
# for v in self.__dict__:
# print v
# print
def _cleanup_constants(self):
const_set = set(self.constants)
muddy_constants = const_set - self.variables
self.constants = list(const_set - muddy_constants)
def _sort_equations(self):
# self.equations = {i: eq for i, eq in enumerate(self.equations.values())}
# self.equations = {}
equations = {}
for j, (i, eq) in enumerate(self.equations.items()):
equations[j] = eq
self.tempmap[j] = i
self.equations = equations
# self.tempmap
def _add_sparse(self, i, var, free_list):
eq = self.equations[i]
self.sparse[i] = (var, eq)
free_list.append(var)
def _delve_nonsparse(self, nonsparse_map, free_list):
equations_ = copy.deepcopy(self.equations)
for i, free_vars in nonsparse_map.items():
redundant = None
eq_temp = equations_[i]
# retrieve set of variables in all equations except the current one
del equations_[i]
variables_set = set(sum((list(eq.free_symbols) for eq in equations_.values()), []))
equations_[i] = eq_temp
# select variables that are not present in free_list
del nonsparse_map[i]
vars_selected = []
free_set = set(free_list)
for var in free_vars:
if var not in free_set:
vars_selected.append(var)
if len(vars_selected) == 1:
redundant = vars_selected[0]
elif len(vars_selected) > 1:
# select unique variables that are not present in any other equation
var_stack = set(sum(nonsparse_map.values(), list(variables_set)))
vars_selected_ = []
for var in free_vars:
if var not in var_stack:
vars_selected_.append(var)
if len(vars_selected_) == 0:
redundant = vars_selected[0]
# warnmsg = _warn_msg[0] % (str(var), i)
warnmsg = _warn_msg[0] % (str(redundant), i)
warnings.warn(warnmsg)
elif len(vars_selected_) == 1:
redundant = vars_selected_[0]
if redundant is not None:
self._add_sparse(i, redundant, free_list)
nonsparse_map[i] = free_vars
def _extract_sparse(self, deep=False):
free_list = []
nonsparse_map = {}
for i, eq in self.equations.items():
free_vars = self._find_free(eq)
mapped = [ v[0] for v in self.sparse.values() ]
if len(free_vars) == 1 and free_vars[0] not in mapped:
self._add_sparse(i, free_vars[0], free_list)
elif len(free_vars) > 1:
nonsparse_map[i] = free_vars
if deep:
self._delve_nonsparse(nonsparse_map, free_list)
return list(set(free_list))
def _make_summary(self, verbose):
self.dof = len(self.variables) - len(self.equations) - len(self.constants)
self.independent = self.variables - set(self.free_list + self.constants)
redundnum = len(self.free_list)
if verbose:
varnum = len(self.variables) - len(self.constants)
info_head = _info_msg['head'] % (varnum, len(self.equations), self.dof)
info_independent = _info_msg['independent'] % len(self.independent)
info_redundant = _info_msg['redundant'] % redundnum
info_constants = _info_msg['constants'] % len(self.constants)
summary = (info_head + info_independent + str(list(self.independent)) +
'\n' + info_redundant + str(self.free_list) + '\n' +
info_constants + str(list(self.constants)))
print summary
self.print_equations()
if verbose == 2 and len(self.sparse) > 0:
self.print_sparse()
if (redundnum > 0) and (len(self.equations) > 1):
print _info_msg['try_shrink']
def _solve(self, eq, var):
c = eq.coeff(var)
extracted = eq - c*var
if var not in list(extracted.free_symbols):
return extracted/abs(c)
else:
warnings.filterwarnings("always")
sol = sy.solve(eq, var)
if len(sol) > 1:
self.multisols[var] = sol
warnings.warn(_warn_msg[1])
print _info_msg['mult'] % str(var), sol, _info_msg['mult_']
return sol[-1]
# return sy.solve(eq, var)
def _compress(self):
if self.verbose:
print "Compressing..."
# find equations with one free dependent variable
replaced = []
min_varset_len = 0
varsets = {}
j = 0
for i, (var, eq) in self.sparse.items():
varset = eq.free_symbols - set(self.constants + [var]) - self.independent
varset_len = len(varset)
min_varset_len = varset_len if j == 0 else min(varset_len, min_varset_len)
varsets[i] = varset
j += 1
for i, (var, eq) in self.sparse.items():
varset = varsets[i]
if (len(varset) <= min_varset_len) and (len(self.equations) > 1):
# if len(varset) == 0:
# if (len(varset) <= min_varset_len):
self.redundant[var] = self._solve(eq, var)
replaced.append(var)
del self.equations[i]
del self.sparse[i]
self._update()
if self.verbose:
print "Replaced variables:", replaced, "\n"
# print
if self.verbose == 2:
self.print_sparse()
self.print_replacementset()
def _update(self):
# self.equations = self.equations_tmp
# update system of equations
self.equations = {i: eq.subs(self.redundant)
for i, eq in self.equations.items()}
# update sparse equations
self.sparse = {i: (var, eq.subs(self.redundant))
for i, (var, eq) in self.sparse.items()}
# find new sparse equations
self._extract_sparse()
def _find_free(self, eq):
"""returns a list of free variables in the equation
which can be easily extracted"""
# adds = list(eq.atoms(sy.Add))
const_to_ones = {const: 1 for const in self.constants}
eqq = eq.subs(const_to_ones)
# muls = list(eq.atoms(sy.Mul))
# mulns = list((eq*(-1)).atoms(sy.Mul))
# pows = list(eq.atoms(sy.Pow))
# muls = list(eqq.atoms(sy.Mul))
# mulns = list((eqq*(-1)).atoms(sy.Mul))
# pows = list(eqq.atoms(sy.Pow))
# print "eq:", eq
# # print "adds:", adds
# print "muls:", muls
# print "mulns:", mulns
# print "pows:", pows
# # print
# break equation into bricks
bricks = map(eqq.atoms, [sy.Mul, sy.Pow]) + [(eqq*(-1)).atoms(sy.Mul)]
bricks = sum(map(list, bricks), [])
# print "bricks:\n", bricks
# print "(muls + mulns + pows):\n", (muls + mulns + pows)
# print
var_combs = [list(atom.free_symbols) for atom in bricks]
unique_combs = {sum(map(ord, str(x))): x for x in var_combs}.values()
# print "unique_combs:", unique_combs
occurances = {var: 0 for var in self.variables}
const_set = set(self.constants)
singles = set()
for comb in unique_combs:
for var in comb: occurances[var] += 1
# if len(comb) == 1: singles.append(comb.pop())
if len(comb) == 1: singles.add(comb.pop())
# print "singles:", singles
free = []
for var in singles:
if occurances[var] == 1: free.append(var)
# print "occurances:", occurances
return list(set(free) - set(self.constants))
def _adopt(self, res, nonnumeric, extended):
for var, eq in res.items():
# res[var] = eq.subs(extended, simultaneous=True).evalf()
res[var] = eq.subs(extended)
try:
f = float(res[var])
extended[var] = f
del nonnumeric[var]
except:
# nonnumeric[var] = eq
pass
def inspect(self):
variables = [ list(eq.atoms(sy.Symbol)) for eq in self.equations.values() ]
self.variables = set(sum(variables, []))
self.long_varname = max(map(len, [str(var) for var in self.variables]))
self._cleanup_constants()
self.free_list = self._extract_sparse(deep=True)
self._register_variables()
self._make_summary(self.verbose)
def shrink(self, simplify=False):
# self.equations_tmp = copy.deepcopy(self.equations)
self._copy_equations()
if self.verbose:
print _info_msg['shrinking']
extreme = len(self.sparse)
_shrinks = [extreme]
for i in range(extreme):
if self.verbose:
print "Iteration %d:" %i
self._compress()
size = len(self.sparse)
_shrinks.append(size)
if (_shrinks[i+1] == _shrinks[i]) or (size == 0):
break
if self.verbose:
print _info_msg['shrinked'] % (extreme - len(self.sparse))
self._sort_equations()
if simplify: self.try_eliminate()
self.inspect()
# conv = abs(self.redundnum - self.shrink_conv)
# if self.redundnum > 0 and conv!=0:
# self.shrink_conv = self.redundnum
# self.shrink()
def adopt(self, adoptdict, constdict={}, verbose=False, verify_subs=False):
"""Adopts solution and calculates values of all redundant variables."""
# res = {}
# print "extended before:", extended
# nn = len(res)
if None in adoptdict.values():
return
else:
nonnumeric = copy.deepcopy(self.redundant)
extended = dict(adoptdict, **constdict)
res = copy.deepcopy(self.redundant)
for i in range(len(res)):
self._adopt(res, nonnumeric, extended)
if len(nonnumeric) == 0:
break
# print "extended after:", extended
# for var, rr in res.items():
# print "[%s]:" %str(var), rr
res_dict = Solution(adoptdict, res)
if verbose: print res_dict
return res_dict
def subs_safe(self, subs_dict, verbose=False):
equations = {i: eq.subs(subs_dict, simultaneous=True)
for i, eq in self.equations.items()}
if verbose: self.print_equations(equations)
return equations
def subs(self, subs_dict, simplify=False, constants=[]):
# for i, var in subs_dict:
# print "self.constants before:", self.constants
self.constants += constants
# t = 0
for var in subs_dict:
if var in self.constants:
i = self.constants.index(var)
# print "i, var:", i, var
self.constants.pop(i)
# t += 1
# if type(var) is sy.Symbol:
# self.constants.append(var)
# print "self.constants after:", self.constants
sp = lambda eq: sy.simplify(eq) if simplify else eq
self.equations = {i: sp(eq.subs(subs_dict, simultaneous=True))
for i, eq in self.equations.items()}
self.redundant = {var: eq.subs(subs_dict, simultaneous=True)
for var, eq in self.redundant.items()}
self.sparse = {i: (var, eq.subs(subs_dict, simultaneous=True))
for i, (var, eq) in self.sparse.items()}
self.inspect()
# print "self.variables:", self.variables
new_variables = []
for eq in subs_dict.values():
try:
new_variables += eq.free_symbols
except:
pass
self._register_variables(new_variables)
def try_eliminate(self):
"""can be used to eliminate unnecessary variables"""
self.equations = {i: sy.simplify(sy.together(eq))
for i, eq in self.equations.items()}
def edit_sparse(self, edit_dict, verbose=False):
for i, var in edit_dict.items():
var_, eq = self.sparse[i]
free_vars = self._find_free(eq)
if var in free_vars:
self.sparse_edit = {i: var}
self.sparse[i] = (var, self.equations[i])
self.free_list = list(set(self.free_list) - set([var_])) + [var]
# self._make_summary()
else:
raise ValueError(_error_msg[1] % (i, str(var)))
self._make_summary(False)
if verbose and len(self.sparse) > 0:
self.print_sparse()
def replace_solution(self, replace_dict):
for var, solnum in replace_dict.items():
sol = self.multisols[var][solnum]
self.redundant[var] = sol
self.equations = {i: self.equations_tmp[self.tempmap[i]]
for i in self.equations}
self._update()
self.print_replacementset()
self.inspect()
def print_sparse(self):
msg = 'Sparse equations:\n'
print (msg + '-'*len(msg))
pvar = 'redundant: %s'
# w = max(map(len, [ str(var) for var in self.variables ])) + len(p)
w = self.long_varname + len(pvar)
for i, (var, eq) in self.sparse.items():
rs = pvar % str(var)
w_ = w - len(rs) + 2
print rs + ' ' * w_ + '[%d]:' % i, eq
print
def print_replacementset(self):
msg = "Replacement set:\n"
print (msg + '-'*len(msg))
for var, eq in self.redundant.items():
svar = str(var)
pvar = '[%s]:' + ' '*(self.long_varname - len(svar) + 1)
print pvar % svar, eq
print
def print_equations(self, equations=None):
if equations is None:
equations = self.equations
msg = "\nEquations:\n"
print (msg + '-'*len(msg))
for i, eq in equations.items():
# print "[%d]: " % (i+1), eq
print "[%d]: " % i, eq
print
Allow adopt() method in System class to accept array of results.
import math, pickle, warnings, time, json, copy
import sympy as sy
from solution import Solution
_error_msg = {
0: ("System expects as maximum as 2 parameters which represent lists "
"of objects and constraints"),
1: "Equation %d cannot be solved against variable %s.",
2: "All objects must have 'equations' attribute.",
3: "At least one equation is required to construct system of equations.",
4: "Arguments must be of type list or tuple."
}
_warn_msg = {
0: ("Variable '%s' has been chosen as a redundant in equation #%d.\n"
"Call edit_sparse() method to replace it with another free variable "
"in the equation."),
1: "More then 1 solution found while replacing redundant variables.",
2: "Keyword argument '%s' is not a valid argument and will be ignored."
}
_info_msg = {
'head': ("=======================================\n"
"total number of variables: %d\n"
"total number of equations: %d\n"
"degrees of freedom: %d\n"
"=======================================\n"),
'independent': "%d independent variables found: ",
'redundant': "%d redundant variables found: ",
'constants': "%d variables marked as constants: ",
'mult': "Redundant variable '%s' has multiple solutions:\n",
'mult_': ("\nBy default last solution has been chosen. "
"Call replace_solution() method to specify other solution.\n"),
'shrinked': "System has been shrinked by %d equations.\n",
'shrinking': "\nShrinking system of equations...\n",
'try_shrink': 'Try shrinking the system by calling shrink() method.',
}
class System(object):
"""
Represents a system of equations.
System of equations can be constructed from a list of equations, list of
objects that have attribute 'equations' (which is a list of equations) or both.
Equations must be a SymPy objects of type Add, Mul or Pow.
Parameters
----------
objects: list of objects that have attribute 'equations'.
constraints: list of equations.
subs: dictionary of variable substitutions.
constants: list of constants in equations.
verbose: controls the output - 0: silent mode; 1 or True: basic output,
2: extended output.
Examples
--------
TODO
"""
# def __init__(self, objects=[], constraints=[], subs={}, constants=[], verbose=True):
def __init__(self, *args, **kwargs):
valid_kwargs = ['subs', 'constants', 'verbose']
# check_kwargs = [kwarg in valid_kwargs for kwarg in kwargs]
for kwarg in kwargs:
if kwarg not in valid_kwargs:
warnings.warn(_warn_msg[2] % kwarg)
subs = kwargs.get(valid_kwargs[0], {})
self.constants = kwargs.get(valid_kwargs[1], [])
self.verbose = kwargs.get(valid_kwargs[2], True)
if len(args) > 2:
raise ValueError(_error_msg[0])
objects = []
constraints = []
oind = None
cind = None
constr_types = [sy.Add, sy.Mul, sy.Pow]
for i, arg in enumerate(args):
if type(arg) not in [list, tuple]:
raise TypeError(_error_msg[4])
if len(arg) > 0:
check = [type(unit) in constr_types for unit in arg]
if not check.count(False):
cind = i
else:
try:
check_obj = [obj.equations for obj in objects]
oind = i
except:
raise ValueError(_error_msg[2])
if oind is not None: objects = list(args[oind])
if cind is not None: constraints = list(args[cind])
# print "objects:", objects
# print "constraints:", constraints
if len(objects + constraints) == 0:
raise ValueError(_error_msg[3])
init_dictattr = ['redundant', 'sparse', 'multisols', 'sparse_edit']
self.__dict__.update((name, {}) for name in init_dictattr)
self.shrink_conv = 0
self.long_varname = 0
try:
eq_list = sum([obj.equations.values() for obj in objects], constraints)
except:
eq_list = sum([obj.equations for obj in objects], constraints)
self.equations = {i: eq.subs(subs) for i, eq in enumerate(eq_list)}
self._copy_equations()
# for i, eq in enumerate(eq_list):
# self.equations[i] = eq.subs(self.subs)
# varlist = list(eq.atoms(sy.Symbol))
# self.variables += varlist
self.inspect()
def _copy_equations(self):
self.equations_tmp = copy.deepcopy(self.equations)
self.tempmap = {i: i for i in self.equations}
def _register_variables(self, add_varlist=[]):
self.__dict__.update((str(v), v) for v in self.variables)
# self.__dict__.update((str(v), v) for v, s in self.sparse.values())
self.__dict__.update((str(v), v) for v in self.redundant)
self.__dict__.update((str(v), v) for v in self.constants)
self.__dict__.update((str(v), v) for v in add_varlist)
# print "self.__dict__:"
# for v in self.__dict__:
# print v
# print
def _cleanup_constants(self):
const_set = set(self.constants)
muddy_constants = const_set - self.variables
self.constants = list(const_set - muddy_constants)
def _sort_equations(self):
# self.equations = {i: eq for i, eq in enumerate(self.equations.values())}
# self.equations = {}
equations = {}
for j, (i, eq) in enumerate(self.equations.items()):
equations[j] = eq
self.tempmap[j] = i
self.equations = equations
# self.tempmap
def _add_sparse(self, i, var, free_list):
eq = self.equations[i]
self.sparse[i] = (var, eq)
free_list.append(var)
def _delve_nonsparse(self, nonsparse_map, free_list):
equations_ = copy.deepcopy(self.equations)
for i, free_vars in nonsparse_map.items():
redundant = None
eq_temp = equations_[i]
# retrieve set of variables in all equations except the current one
del equations_[i]
variables_set = set(sum((list(eq.free_symbols) for eq in equations_.values()), []))
equations_[i] = eq_temp
# select variables that are not present in free_list
del nonsparse_map[i]
vars_selected = []
free_set = set(free_list)
for var in free_vars:
if var not in free_set:
vars_selected.append(var)
if len(vars_selected) == 1:
redundant = vars_selected[0]
elif len(vars_selected) > 1:
# select unique variables that are not present in any other equation
var_stack = set(sum(nonsparse_map.values(), list(variables_set)))
vars_selected_ = []
for var in free_vars:
if var not in var_stack:
vars_selected_.append(var)
if len(vars_selected_) == 0:
redundant = vars_selected[0]
# warnmsg = _warn_msg[0] % (str(var), i)
warnmsg = _warn_msg[0] % (str(redundant), i)
warnings.warn(warnmsg)
elif len(vars_selected_) == 1:
redundant = vars_selected_[0]
if redundant is not None:
self._add_sparse(i, redundant, free_list)
nonsparse_map[i] = free_vars
def _extract_sparse(self, deep=False):
free_list = []
nonsparse_map = {}
for i, eq in self.equations.items():
free_vars = self._find_free(eq)
mapped = [ v[0] for v in self.sparse.values() ]
if len(free_vars) == 1 and free_vars[0] not in mapped:
self._add_sparse(i, free_vars[0], free_list)
elif len(free_vars) > 1:
nonsparse_map[i] = free_vars
if deep:
self._delve_nonsparse(nonsparse_map, free_list)
return list(set(free_list))
def _make_summary(self, verbose):
self.dof = len(self.variables) - len(self.equations) - len(self.constants)
self.independent = self.variables - set(self.free_list + self.constants)
redundnum = len(self.free_list)
if verbose:
varnum = len(self.variables) - len(self.constants)
info_head = _info_msg['head'] % (varnum, len(self.equations), self.dof)
info_independent = _info_msg['independent'] % len(self.independent)
info_redundant = _info_msg['redundant'] % redundnum
info_constants = _info_msg['constants'] % len(self.constants)
summary = (info_head + info_independent + str(list(self.independent)) +
'\n' + info_redundant + str(self.free_list) + '\n' +
info_constants + str(list(self.constants)))
print summary
self.print_equations()
if verbose == 2 and len(self.sparse) > 0:
self.print_sparse()
if (redundnum > 0) and (len(self.equations) > 1):
print _info_msg['try_shrink']
def _solve(self, eq, var):
c = eq.coeff(var)
extracted = eq - c*var
if var not in list(extracted.free_symbols):
return extracted/abs(c)
else:
warnings.filterwarnings("always")
sol = sy.solve(eq, var)
if len(sol) > 1:
self.multisols[var] = sol
warnings.warn(_warn_msg[1])
print _info_msg['mult'] % str(var), sol, _info_msg['mult_']
return sol[-1]
# return sy.solve(eq, var)
def _compress(self):
if self.verbose:
print "Compressing..."
# find equations with one free dependent variable
replaced = []
min_varset_len = 0
varsets = {}
j = 0
for i, (var, eq) in self.sparse.items():
varset = eq.free_symbols - set(self.constants + [var]) - self.independent
varset_len = len(varset)
min_varset_len = varset_len if j == 0 else min(varset_len, min_varset_len)
varsets[i] = varset
j += 1
for i, (var, eq) in self.sparse.items():
varset = varsets[i]
if (len(varset) <= min_varset_len) and (len(self.equations) > 1):
# if len(varset) == 0:
# if (len(varset) <= min_varset_len):
self.redundant[var] = self._solve(eq, var)
replaced.append(var)
del self.equations[i]
del self.sparse[i]
self._update()
if self.verbose:
print "Replaced variables:", replaced, "\n"
# print
if self.verbose == 2:
self.print_sparse()
self.print_replacementset()
def _update(self):
# self.equations = self.equations_tmp
# update system of equations
self.equations = {i: eq.subs(self.redundant)
for i, eq in self.equations.items()}
# update sparse equations
self.sparse = {i: (var, eq.subs(self.redundant))
for i, (var, eq) in self.sparse.items()}
# find new sparse equations
self._extract_sparse()
def _find_free(self, eq):
"""returns a list of free variables in the equation
which can be easily extracted"""
# adds = list(eq.atoms(sy.Add))
const_to_ones = {const: 1 for const in self.constants}
eqq = eq.subs(const_to_ones)
# muls = list(eq.atoms(sy.Mul))
# mulns = list((eq*(-1)).atoms(sy.Mul))
# pows = list(eq.atoms(sy.Pow))
# muls = list(eqq.atoms(sy.Mul))
# mulns = list((eqq*(-1)).atoms(sy.Mul))
# pows = list(eqq.atoms(sy.Pow))
# print "eq:", eq
# # print "adds:", adds
# print "muls:", muls
# print "mulns:", mulns
# print "pows:", pows
# # print
# break equation into bricks
bricks = map(eqq.atoms, [sy.Mul, sy.Pow]) + [(eqq*(-1)).atoms(sy.Mul)]
bricks = sum(map(list, bricks), [])
# print "bricks:\n", bricks
# print "(muls + mulns + pows):\n", (muls + mulns + pows)
# print
var_combs = [list(atom.free_symbols) for atom in bricks]
unique_combs = {sum(map(ord, str(x))): x for x in var_combs}.values()
# print "unique_combs:", unique_combs
occurances = {var: 0 for var in self.variables}
const_set = set(self.constants)
singles = set()
for comb in unique_combs:
for var in comb: occurances[var] += 1
# if len(comb) == 1: singles.append(comb.pop())
if len(comb) == 1: singles.add(comb.pop())
# print "singles:", singles
free = []
for var in singles:
if occurances[var] == 1: free.append(var)
# print "occurances:", occurances
return list(set(free) - set(self.constants))
def _adopt(self, res, nonnumeric, extended):
for var, eq in res.items():
# res[var] = eq.subs(extended, simultaneous=True).evalf()
res[var] = eq.subs(extended)
try:
f = float(res[var])
extended[var] = f
# del nonnumeric[var]
nonnumeric.pop()
except:
# nonnumeric[var] = eq
pass
def inspect(self):
variables = [ list(eq.atoms(sy.Symbol)) for eq in self.equations.values() ]
self.variables = set(sum(variables, []))
self.long_varname = max(map(len, [str(var) for var in self.variables]))
self._cleanup_constants()
self.free_list = self._extract_sparse(deep=True)
self._register_variables()
self._make_summary(self.verbose)
def shrink(self, simplify=False):
# self.equations_tmp = copy.deepcopy(self.equations)
self._copy_equations()
if self.verbose:
print _info_msg['shrinking']
extreme = len(self.sparse)
_shrinks = [extreme]
for i in range(extreme):
if self.verbose:
print "Iteration %d:" %i
self._compress()
size = len(self.sparse)
_shrinks.append(size)
if (_shrinks[i+1] == _shrinks[i]) or (size == 0):
break
if self.verbose:
print _info_msg['shrinked'] % (extreme - len(self.sparse))
self._sort_equations()
if simplify: self.try_eliminate()
self.inspect()
# conv = abs(self.redundnum - self.shrink_conv)
# if self.redundnum > 0 and conv!=0:
# self.shrink_conv = self.redundnum
# self.shrink()
def adopt(self, result, constdict={}, verbose=False, verify_subs=False):
"""Adopts solution and calculates values of all redundant variables."""
if type(result) in [list, tuple]:
reslist = result
elif type(result) in (Solution, dict):
reslist = [result]
else:
raise ValueError("adoptdict type must represent solution or list of solutions")
adopted = []
for res in reslist:
if None in res.values():
nonedict = {var: None for var in self.redundant}
res_dict = Solution(res, nonedict)
# pass
else:
# nonnumeric = copy.deepcopy(self.redundant)
nonnumeric = range(len(self.redundant))
# extended = dict(res, **constdict)
extended = res
tosolve = copy.deepcopy(self.redundant)
for i in range(len(tosolve)):
self._adopt(tosolve, nonnumeric, extended)
if len(nonnumeric) == 0:
break
# print "extended after:", extended
# for var, rr in tosolve.items():
# print "[%s]:" %str(var), rr
res_dict = Solution(res, tosolve)
adopted.append(res_dict)
if verbose: print res_dict
# return res_dict
return adopted
def subs_safe(self, subs_dict, verbose=False):
equations = {i: eq.subs(subs_dict, simultaneous=True)
for i, eq in self.equations.items()}
if verbose: self.print_equations(equations)
return equations
def subs(self, subs_dict, simplify=False, constants=[]):
# for i, var in subs_dict:
# print "self.constants before:", self.constants
self.constants += constants
# t = 0
for var in subs_dict:
if var in self.constants:
i = self.constants.index(var)
# print "i, var:", i, var
self.constants.pop(i)
# t += 1
# if type(var) is sy.Symbol:
# self.constants.append(var)
# print "self.constants after:", self.constants
sp = lambda eq: sy.simplify(eq) if simplify else eq
self.equations = {i: sp(eq.subs(subs_dict, simultaneous=True))
for i, eq in self.equations.items()}
self.redundant = {var: eq.subs(subs_dict, simultaneous=True)
for var, eq in self.redundant.items()}
self.sparse = {i: (var, eq.subs(subs_dict, simultaneous=True))
for i, (var, eq) in self.sparse.items()}
self.inspect()
# print "self.variables:", self.variables
new_variables = []
for eq in subs_dict.values():
try:
new_variables += eq.free_symbols
except:
pass
self._register_variables(new_variables)
def try_eliminate(self):
"""can be used to eliminate unnecessary variables"""
self.equations = {i: sy.simplify(sy.together(eq))
for i, eq in self.equations.items()}
def edit_sparse(self, edit_dict, verbose=False):
for i, var in edit_dict.items():
var_, eq = self.sparse[i]
free_vars = self._find_free(eq)
if var in free_vars:
self.sparse_edit = {i: var}
self.sparse[i] = (var, self.equations[i])
self.free_list = list(set(self.free_list) - set([var_])) + [var]
# self._make_summary()
else:
raise ValueError(_error_msg[1] % (i, str(var)))
self._make_summary(False)
if verbose and len(self.sparse) > 0:
self.print_sparse()
def replace_solution(self, replace_dict):
for var, solnum in replace_dict.items():
sol = self.multisols[var][solnum]
self.redundant[var] = sol
self.equations = {i: self.equations_tmp[self.tempmap[i]]
for i in self.equations}
self._update()
self.print_replacementset()
self.inspect()
def print_sparse(self):
msg = 'Sparse equations:\n'
print (msg + '-'*len(msg))
pvar = 'redundant: %s'
# w = max(map(len, [ str(var) for var in self.variables ])) + len(p)
w = self.long_varname + len(pvar)
for i, (var, eq) in self.sparse.items():
rs = pvar % str(var)
w_ = w - len(rs) + 2
print rs + ' ' * w_ + '[%d]:' % i, eq
print
def print_replacementset(self):
msg = "Replacement set:\n"
print (msg + '-'*len(msg))
for var, eq in self.redundant.items():
svar = str(var)
pvar = '[%s]:' + ' '*(self.long_varname - len(svar) + 1)
print pvar % svar, eq
print
def print_equations(self, equations=None):
if equations is None:
equations = self.equations
msg = "\nEquations:\n"
print (msg + '-'*len(msg))
for i, eq in equations.items():
# print "[%d]: " % (i+1), eq
print "[%d]: " % i, eq
print
|
import logging
import os
import shutil
import subprocess
from scripts.lib.zulip_tools import run, run_as_root, ENDC, WARNING
from scripts.lib.hash_reqs import expand_reqs
from typing import List, Optional, Tuple, Set
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VENV_CACHE_PATH = "/srv/zulip-venv-cache"
if 'TRAVIS' in os.environ:
# In Travis CI, we don't have root access
VENV_CACHE_PATH = "/home/travis/zulip-venv-cache"
VENV_DEPENDENCIES = [
"build-essential",
"libffi-dev",
"libfreetype6-dev", # Needed for image types with Pillow
"zlib1g-dev", # Needed to handle compressed PNGs with Pillow
"libjpeg-dev", # Needed to handle JPEGs with Pillow
"libldap2-dev",
"libmemcached-dev",
"python3-dev", # Needed to install typed-ast dependency of mypy
"python-dev",
"python3-pip",
"python-pip",
"virtualenv",
"python3-six",
"python-six",
"libxml2-dev", # Used for installing talon and python-xmlsec
"libxslt1-dev", # Used for installing talon
"libpq-dev", # Needed by psycopg2
"libssl-dev", # Needed to build pycurl and other libraries
"libmagic1", # Used for install python-magic
# Needed by python-xmlsec:
"libxmlsec1-dev",
"libxmlsec1-openssl",
# This is technically a node dependency, but we add it here
# because we don't have another place that we install apt packages
# on upgrade of a production server, and it's not worth adding
# another call to `apt install` for.
"jq", # Used by scripts/lib/install-node to check yarn version
]
COMMON_YUM_VENV_DEPENDENCIES = [
"libffi-devel",
"freetype-devel",
"zlib-devel",
"libjpeg-turbo-devel",
"openldap-devel",
"libmemcached-devel",
"python-devel",
"python2-pip",
"python-six",
# Needed by python-xmlsec:
"gcc"
"python3-devel",
"libxml2-devel",
"xmlsec1-devel",
"xmlsec1-openssl-devel",
"libtool-ltdl-devel",
"libxslt-devel",
"postgresql-libs", # libpq-dev on apt
"openssl-devel",
"jq",
]
REDHAT_VENV_DEPENDENCIES = COMMON_YUM_VENV_DEPENDENCIES + [
"python36-devel",
"python36-six",
"python-virtualenv",
]
FEDORA_VENV_DEPENDENCIES = COMMON_YUM_VENV_DEPENDENCIES + [
"python3-pip",
"python3-six",
"virtualenv", # see https://unix.stackexchange.com/questions/27877/install-virtualenv-on-fedora-16
]
THUMBOR_VENV_DEPENDENCIES = [
"libcurl4-openssl-dev",
"libjpeg-dev",
"zlib1g-dev",
"libfreetype6-dev",
"libpng-dev",
"gifsicle",
]
YUM_THUMBOR_VENV_DEPENDENCIES = [
"libcurl-devel",
"libjpeg-turbo-devel",
"zlib-devel",
"freetype-devel",
"libpng-devel",
"gifsicle",
]
def install_venv_deps(pip, requirements_file):
# type: (str, str) -> None
pip_requirements = os.path.join(ZULIP_PATH, "requirements", "pip.txt")
run([pip, "install", "--force-reinstall", "--requirement", pip_requirements])
run([pip, "install", "--no-deps", "--requirement", requirements_file])
def get_index_filename(venv_path):
# type: (str) -> str
return os.path.join(venv_path, 'package_index')
def get_package_names(requirements_file):
# type: (str) -> List[str]
packages = expand_reqs(requirements_file)
cleaned = []
operators = ['~=', '==', '!=', '<', '>']
for package in packages:
if package.startswith("git+https://") and '#egg=' in package:
split_package = package.split("#egg=")
if len(split_package) != 2:
raise Exception("Unexpected duplicate #egg in package %s" % (package,))
# Extract the package name from Git requirements entries
package = split_package[1]
for operator in operators:
if operator in package:
package = package.split(operator)[0]
package = package.strip()
if package:
cleaned.append(package.lower())
return sorted(cleaned)
def create_requirements_index_file(venv_path, requirements_file):
# type: (str, str) -> str
"""
Creates a file, called package_index, in the virtual environment
directory that contains all the PIP packages installed in the
virtual environment. This file is used to determine the packages
that can be copied to a new virtual environment.
"""
index_filename = get_index_filename(venv_path)
packages = get_package_names(requirements_file)
with open(index_filename, 'w') as writer:
writer.write('\n'.join(packages))
writer.write('\n')
return index_filename
def get_venv_packages(venv_path):
# type: (str) -> Set[str]
"""
Returns the packages installed in the virtual environment using the
package index file.
"""
with open(get_index_filename(venv_path)) as reader:
return set(p.strip() for p in reader.read().split('\n') if p.strip())
def try_to_copy_venv(venv_path, new_packages):
# type: (str, Set[str]) -> bool
"""
Tries to copy packages from an old virtual environment in the cache
to the new virtual environment. The algorithm works as follows:
1. Find a virtual environment, v, from the cache that has the
highest overlap with the new requirements such that:
a. The new requirements only add to the packages of v.
b. The new requirements only upgrade packages of v.
2. Copy the contents of v to the new virtual environment using
virtualenv-clone.
3. Delete all .pyc files in the new virtual environment.
"""
if not os.path.exists(VENV_CACHE_PATH):
return False
venv_name = os.path.basename(venv_path)
overlaps = [] # type: List[Tuple[int, str, Set[str]]]
old_packages = set() # type: Set[str]
for sha1sum in os.listdir(VENV_CACHE_PATH):
curr_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, venv_name)
if (curr_venv_path == venv_path or
not os.path.exists(get_index_filename(curr_venv_path))):
continue
old_packages = get_venv_packages(curr_venv_path)
# We only consider using using old virtualenvs that only
# contain packages that we want in our new virtualenv.
if not (old_packages - new_packages):
overlap = new_packages & old_packages
overlaps.append((len(overlap), curr_venv_path, overlap))
target_log = get_logfile_name(venv_path)
source_venv_path = None
if overlaps:
# Here, we select the old virtualenv with the largest overlap
overlaps = sorted(overlaps)
_, source_venv_path, copied_packages = overlaps[-1]
print('Copying packages from {}'.format(source_venv_path))
clone_ve = "{}/bin/virtualenv-clone".format(source_venv_path)
cmd = [clone_ve, source_venv_path, venv_path]
try:
# TODO: We can probably remove this in a few months, now
# that we can expect that virtualenv-clone is present in
# all of our recent virtualenvs.
run_as_root(cmd)
except subprocess.CalledProcessError:
# Virtualenv-clone is either not installed or threw an
# error. Just return False: making a new venv is safe.
logging.warning("Error cloning virtualenv %s" % (source_venv_path,))
return False
# virtualenv-clone, unfortunately, copies the success stamp,
# which means if the upcoming `pip install` phase were to
# fail, we'd end up with a broken half-provisioned virtualenv
# that's incorrectly tagged as properly provisioned. The
# right fix is to use
# https://github.com/edwardgeorge/virtualenv-clone/pull/38,
# but this rm is almost as good.
success_stamp_path = os.path.join(venv_path, 'success-stamp')
run_as_root(["rm", "-f", success_stamp_path])
run_as_root(["chown", "-R",
"{}:{}".format(os.getuid(), os.getgid()), venv_path])
source_log = get_logfile_name(source_venv_path)
copy_parent_log(source_log, target_log)
create_log_entry(target_log, source_venv_path, copied_packages,
new_packages - copied_packages)
return True
return False
def get_logfile_name(venv_path):
# type: (str) -> str
return "{}/setup-venv.log".format(venv_path)
def create_log_entry(target_log, parent, copied_packages, new_packages):
# type: (str, str, Set[str], Set[str]) -> None
venv_path = os.path.dirname(target_log)
with open(target_log, 'a') as writer:
writer.write("{}\n".format(venv_path))
if copied_packages:
writer.write(
"Copied from {}:\n".format(parent))
writer.write("\n".join('- {}'.format(p) for p in sorted(copied_packages)))
writer.write("\n")
writer.write("New packages:\n")
writer.write("\n".join('- {}'.format(p) for p in sorted(new_packages)))
writer.write("\n\n")
def copy_parent_log(source_log, target_log):
# type: (str, str) -> None
if os.path.exists(source_log):
shutil.copyfile(source_log, target_log)
def do_patch_activate_script(venv_path):
# type: (str) -> None
"""
Patches the bin/activate script so that the value of the environment variable VIRTUAL_ENV
is set to venv_path during the script's execution whenever it is sourced.
"""
# venv_path should be what we want to have in VIRTUAL_ENV after patching
script_path = os.path.join(venv_path, "bin", "activate")
with open(script_path, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if line.startswith('VIRTUAL_ENV='):
lines[i] = 'VIRTUAL_ENV="%s"\n' % (venv_path,)
with open(script_path, 'w') as f:
f.write("".join(lines))
def setup_virtualenv(target_venv_path, requirements_file, virtualenv_args=None, patch_activate_script=False):
# type: (Optional[str], str, Optional[List[str]], bool) -> str
# Check if a cached version already exists
path = os.path.join(ZULIP_PATH, 'scripts', 'lib', 'hash_reqs.py')
output = subprocess.check_output([path, requirements_file], universal_newlines=True)
sha1sum = output.split()[0]
if target_venv_path is None:
cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, 'venv')
else:
cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, os.path.basename(target_venv_path))
success_stamp = os.path.join(cached_venv_path, "success-stamp")
if not os.path.exists(success_stamp):
do_setup_virtualenv(cached_venv_path, requirements_file, virtualenv_args or [])
with open(success_stamp, 'w') as f:
f.close()
print("Using cached Python venv from %s" % (cached_venv_path,))
if target_venv_path is not None:
run_as_root(["ln", "-nsf", cached_venv_path, target_venv_path])
if patch_activate_script:
do_patch_activate_script(target_venv_path)
return cached_venv_path
def add_cert_to_pipconf():
# type: () -> None
conffile = os.path.expanduser("~/.pip/pip.conf")
confdir = os.path.expanduser("~/.pip/")
os.makedirs(confdir, exist_ok=True)
run(["crudini", "--set", conffile, "global", "cert", os.environ["CUSTOM_CA_CERTIFICATES"]])
def do_setup_virtualenv(venv_path, requirements_file, virtualenv_args):
# type: (str, str, List[str]) -> None
# Setup Python virtualenv
new_packages = set(get_package_names(requirements_file))
run_as_root(["rm", "-rf", venv_path])
if not try_to_copy_venv(venv_path, new_packages):
# Create new virtualenv.
run_as_root(["mkdir", "-p", venv_path])
run_as_root(["virtualenv"] + virtualenv_args + [venv_path])
run_as_root(["chown", "-R",
"{}:{}".format(os.getuid(), os.getgid()), venv_path])
create_log_entry(get_logfile_name(venv_path), "", set(), new_packages)
create_requirements_index_file(venv_path, requirements_file)
pip = os.path.join(venv_path, "bin", "pip")
# use custom certificate if needed
if os.environ.get('CUSTOM_CA_CERTIFICATES'):
print("Configuring pip to use custom CA certificates...")
add_cert_to_pipconf()
try:
install_venv_deps(pip, requirements_file)
except subprocess.CalledProcessError:
# Might be a failure due to network connection issues. Retrying...
print(WARNING + "`pip install` failed; retrying..." + ENDC)
install_venv_deps(pip, requirements_file)
run_as_root(["chmod", "-R", "a+rX", venv_path])
setup_venv: Add pkg-config to VENV_DEPENDENCIES.
This is needed on at least Debian 10, otherwise xmlsec fails to
install: `Could not find xmlsec1 config. Are libxmlsec1-dev and
pkg-config installed?`
Also remove libxmlsec1-openssl, which libxmlsec1-dev already depends.
(No changes are needed on RHEL, where libxml2-devel and xmlsec1-devel
already declare a requirement on /usr/bin/pkg-config.)
Signed-off-by: Anders Kaseorg <dfdb7392591db597bc41cf266a9c3bc12a2706e5@zulipchat.com>
import logging
import os
import shutil
import subprocess
from scripts.lib.zulip_tools import run, run_as_root, ENDC, WARNING
from scripts.lib.hash_reqs import expand_reqs
from typing import List, Optional, Tuple, Set
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VENV_CACHE_PATH = "/srv/zulip-venv-cache"
if 'TRAVIS' in os.environ:
# In Travis CI, we don't have root access
VENV_CACHE_PATH = "/home/travis/zulip-venv-cache"
VENV_DEPENDENCIES = [
"build-essential",
"libffi-dev",
"libfreetype6-dev", # Needed for image types with Pillow
"zlib1g-dev", # Needed to handle compressed PNGs with Pillow
"libjpeg-dev", # Needed to handle JPEGs with Pillow
"libldap2-dev",
"libmemcached-dev",
"python3-dev", # Needed to install typed-ast dependency of mypy
"python-dev",
"python3-pip",
"python-pip",
"virtualenv",
"python3-six",
"python-six",
"libxml2-dev", # Used for installing talon and python-xmlsec
"libxslt1-dev", # Used for installing talon
"libpq-dev", # Needed by psycopg2
"libssl-dev", # Needed to build pycurl and other libraries
"libmagic1", # Used for install python-magic
# Needed by python-xmlsec:
"libxmlsec1-dev",
"pkg-config",
# This is technically a node dependency, but we add it here
# because we don't have another place that we install apt packages
# on upgrade of a production server, and it's not worth adding
# another call to `apt install` for.
"jq", # Used by scripts/lib/install-node to check yarn version
]
COMMON_YUM_VENV_DEPENDENCIES = [
"libffi-devel",
"freetype-devel",
"zlib-devel",
"libjpeg-turbo-devel",
"openldap-devel",
"libmemcached-devel",
"python-devel",
"python2-pip",
"python-six",
# Needed by python-xmlsec:
"gcc"
"python3-devel",
"libxml2-devel",
"xmlsec1-devel",
"xmlsec1-openssl-devel",
"libtool-ltdl-devel",
"libxslt-devel",
"postgresql-libs", # libpq-dev on apt
"openssl-devel",
"jq",
]
REDHAT_VENV_DEPENDENCIES = COMMON_YUM_VENV_DEPENDENCIES + [
"python36-devel",
"python36-six",
"python-virtualenv",
]
FEDORA_VENV_DEPENDENCIES = COMMON_YUM_VENV_DEPENDENCIES + [
"python3-pip",
"python3-six",
"virtualenv", # see https://unix.stackexchange.com/questions/27877/install-virtualenv-on-fedora-16
]
THUMBOR_VENV_DEPENDENCIES = [
"libcurl4-openssl-dev",
"libjpeg-dev",
"zlib1g-dev",
"libfreetype6-dev",
"libpng-dev",
"gifsicle",
]
YUM_THUMBOR_VENV_DEPENDENCIES = [
"libcurl-devel",
"libjpeg-turbo-devel",
"zlib-devel",
"freetype-devel",
"libpng-devel",
"gifsicle",
]
def install_venv_deps(pip, requirements_file):
# type: (str, str) -> None
pip_requirements = os.path.join(ZULIP_PATH, "requirements", "pip.txt")
run([pip, "install", "--force-reinstall", "--requirement", pip_requirements])
run([pip, "install", "--no-deps", "--requirement", requirements_file])
def get_index_filename(venv_path):
# type: (str) -> str
return os.path.join(venv_path, 'package_index')
def get_package_names(requirements_file):
# type: (str) -> List[str]
packages = expand_reqs(requirements_file)
cleaned = []
operators = ['~=', '==', '!=', '<', '>']
for package in packages:
if package.startswith("git+https://") and '#egg=' in package:
split_package = package.split("#egg=")
if len(split_package) != 2:
raise Exception("Unexpected duplicate #egg in package %s" % (package,))
# Extract the package name from Git requirements entries
package = split_package[1]
for operator in operators:
if operator in package:
package = package.split(operator)[0]
package = package.strip()
if package:
cleaned.append(package.lower())
return sorted(cleaned)
def create_requirements_index_file(venv_path, requirements_file):
# type: (str, str) -> str
"""
Creates a file, called package_index, in the virtual environment
directory that contains all the PIP packages installed in the
virtual environment. This file is used to determine the packages
that can be copied to a new virtual environment.
"""
index_filename = get_index_filename(venv_path)
packages = get_package_names(requirements_file)
with open(index_filename, 'w') as writer:
writer.write('\n'.join(packages))
writer.write('\n')
return index_filename
def get_venv_packages(venv_path):
# type: (str) -> Set[str]
"""
Returns the packages installed in the virtual environment using the
package index file.
"""
with open(get_index_filename(venv_path)) as reader:
return set(p.strip() for p in reader.read().split('\n') if p.strip())
def try_to_copy_venv(venv_path, new_packages):
# type: (str, Set[str]) -> bool
"""
Tries to copy packages from an old virtual environment in the cache
to the new virtual environment. The algorithm works as follows:
1. Find a virtual environment, v, from the cache that has the
highest overlap with the new requirements such that:
a. The new requirements only add to the packages of v.
b. The new requirements only upgrade packages of v.
2. Copy the contents of v to the new virtual environment using
virtualenv-clone.
3. Delete all .pyc files in the new virtual environment.
"""
if not os.path.exists(VENV_CACHE_PATH):
return False
venv_name = os.path.basename(venv_path)
overlaps = [] # type: List[Tuple[int, str, Set[str]]]
old_packages = set() # type: Set[str]
for sha1sum in os.listdir(VENV_CACHE_PATH):
curr_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, venv_name)
if (curr_venv_path == venv_path or
not os.path.exists(get_index_filename(curr_venv_path))):
continue
old_packages = get_venv_packages(curr_venv_path)
# We only consider using using old virtualenvs that only
# contain packages that we want in our new virtualenv.
if not (old_packages - new_packages):
overlap = new_packages & old_packages
overlaps.append((len(overlap), curr_venv_path, overlap))
target_log = get_logfile_name(venv_path)
source_venv_path = None
if overlaps:
# Here, we select the old virtualenv with the largest overlap
overlaps = sorted(overlaps)
_, source_venv_path, copied_packages = overlaps[-1]
print('Copying packages from {}'.format(source_venv_path))
clone_ve = "{}/bin/virtualenv-clone".format(source_venv_path)
cmd = [clone_ve, source_venv_path, venv_path]
try:
# TODO: We can probably remove this in a few months, now
# that we can expect that virtualenv-clone is present in
# all of our recent virtualenvs.
run_as_root(cmd)
except subprocess.CalledProcessError:
# Virtualenv-clone is either not installed or threw an
# error. Just return False: making a new venv is safe.
logging.warning("Error cloning virtualenv %s" % (source_venv_path,))
return False
# virtualenv-clone, unfortunately, copies the success stamp,
# which means if the upcoming `pip install` phase were to
# fail, we'd end up with a broken half-provisioned virtualenv
# that's incorrectly tagged as properly provisioned. The
# right fix is to use
# https://github.com/edwardgeorge/virtualenv-clone/pull/38,
# but this rm is almost as good.
success_stamp_path = os.path.join(venv_path, 'success-stamp')
run_as_root(["rm", "-f", success_stamp_path])
run_as_root(["chown", "-R",
"{}:{}".format(os.getuid(), os.getgid()), venv_path])
source_log = get_logfile_name(source_venv_path)
copy_parent_log(source_log, target_log)
create_log_entry(target_log, source_venv_path, copied_packages,
new_packages - copied_packages)
return True
return False
def get_logfile_name(venv_path):
# type: (str) -> str
return "{}/setup-venv.log".format(venv_path)
def create_log_entry(target_log, parent, copied_packages, new_packages):
# type: (str, str, Set[str], Set[str]) -> None
venv_path = os.path.dirname(target_log)
with open(target_log, 'a') as writer:
writer.write("{}\n".format(venv_path))
if copied_packages:
writer.write(
"Copied from {}:\n".format(parent))
writer.write("\n".join('- {}'.format(p) for p in sorted(copied_packages)))
writer.write("\n")
writer.write("New packages:\n")
writer.write("\n".join('- {}'.format(p) for p in sorted(new_packages)))
writer.write("\n\n")
def copy_parent_log(source_log, target_log):
# type: (str, str) -> None
if os.path.exists(source_log):
shutil.copyfile(source_log, target_log)
def do_patch_activate_script(venv_path):
# type: (str) -> None
"""
Patches the bin/activate script so that the value of the environment variable VIRTUAL_ENV
is set to venv_path during the script's execution whenever it is sourced.
"""
# venv_path should be what we want to have in VIRTUAL_ENV after patching
script_path = os.path.join(venv_path, "bin", "activate")
with open(script_path, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if line.startswith('VIRTUAL_ENV='):
lines[i] = 'VIRTUAL_ENV="%s"\n' % (venv_path,)
with open(script_path, 'w') as f:
f.write("".join(lines))
def setup_virtualenv(target_venv_path, requirements_file, virtualenv_args=None, patch_activate_script=False):
# type: (Optional[str], str, Optional[List[str]], bool) -> str
# Check if a cached version already exists
path = os.path.join(ZULIP_PATH, 'scripts', 'lib', 'hash_reqs.py')
output = subprocess.check_output([path, requirements_file], universal_newlines=True)
sha1sum = output.split()[0]
if target_venv_path is None:
cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, 'venv')
else:
cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, os.path.basename(target_venv_path))
success_stamp = os.path.join(cached_venv_path, "success-stamp")
if not os.path.exists(success_stamp):
do_setup_virtualenv(cached_venv_path, requirements_file, virtualenv_args or [])
with open(success_stamp, 'w') as f:
f.close()
print("Using cached Python venv from %s" % (cached_venv_path,))
if target_venv_path is not None:
run_as_root(["ln", "-nsf", cached_venv_path, target_venv_path])
if patch_activate_script:
do_patch_activate_script(target_venv_path)
return cached_venv_path
def add_cert_to_pipconf():
# type: () -> None
conffile = os.path.expanduser("~/.pip/pip.conf")
confdir = os.path.expanduser("~/.pip/")
os.makedirs(confdir, exist_ok=True)
run(["crudini", "--set", conffile, "global", "cert", os.environ["CUSTOM_CA_CERTIFICATES"]])
def do_setup_virtualenv(venv_path, requirements_file, virtualenv_args):
# type: (str, str, List[str]) -> None
# Setup Python virtualenv
new_packages = set(get_package_names(requirements_file))
run_as_root(["rm", "-rf", venv_path])
if not try_to_copy_venv(venv_path, new_packages):
# Create new virtualenv.
run_as_root(["mkdir", "-p", venv_path])
run_as_root(["virtualenv"] + virtualenv_args + [venv_path])
run_as_root(["chown", "-R",
"{}:{}".format(os.getuid(), os.getgid()), venv_path])
create_log_entry(get_logfile_name(venv_path), "", set(), new_packages)
create_requirements_index_file(venv_path, requirements_file)
pip = os.path.join(venv_path, "bin", "pip")
# use custom certificate if needed
if os.environ.get('CUSTOM_CA_CERTIFICATES'):
print("Configuring pip to use custom CA certificates...")
add_cert_to_pipconf()
try:
install_venv_deps(pip, requirements_file)
except subprocess.CalledProcessError:
# Might be a failure due to network connection issues. Retrying...
print(WARNING + "`pip install` failed; retrying..." + ENDC)
install_venv_deps(pip, requirements_file)
run_as_root(["chmod", "-R", "a+rX", venv_path])
|
#!/usr/bin/env python3
import argparse
import collections
import csv
import sqlite3
import psycopg2
import psycopg2.extras
Point = collections.namedtuple('Point', ['x', 'y'])
sn_cur = None
sv_cur = None
seq_comp = None
seq_memb = None
seeds = {}
def reorder_pipe(pipe, seed):
"""Bounds: -24;30;-18;21"""
x_size = 56
y_size = 41
field = [[0]*y_size for _ in range(x_size)]
for pt in pipe:
field[pt.x][pt.y] = 1
output = [seed]
if field[seed.x][seed.y] == 0:
# bad stuff happened
print(f'Seed {seed} not found')
return output
prev = None
curr = seed
while True:
adiacence = [(curr.x+dx, curr.y+dy, field[curr.x+dx][curr.y+dy]) for dx,dy in [(1,0), (-1,0), (0,1), (0,-1)]]
s = sum(val[2] for val in adiacence)
if prev is None and s == 1:
# starting, good case
prev,curr = curr,Point(*([(x,y) for x,y,o in adiacence if o == 1][0]))
output.append(curr)
elif prev is not None and s == 2:
# continuing, good case
prev,curr = curr,Point(*([(x,y) for x,y,o in adiacence if o == 1 and not (x == prev.x and y == prev.y)][0]))
output.append(curr)
else:
# see if we've finished all the pipes
if len(output) != len(pipe):
print('Incomplete piping')
return output
def load_solution(sol_id, replace_old_score):
global seq_comp
global seq_memb
# get the level and the score
sn_cur.execute(r'''select internal_name, cycle_count, symbol_count, reactor_count
from solutions s, levels l
where s.level_id = l.level_id
and s.solution_id = %s;''', (sol_id,))
solution = sn_cur.fetchone()
if solution is None:
print(f'ERROR: Invalid solution id: {sol_id}')
exit()
int_level_name = solution['internal_name']
triplet = solution[1:]
# check if the level has a solution already
sv_cur.execute(r"SELECT count(*) from Level where id = ?", (int_level_name,))
count = sv_cur.fetchone()[0]
if count == 0:
# need to insert the level
print(f'Adding new solution to {int_level_name}')
sv_cur.execute(r"""INSERT INTO Level
VALUES (?, 1, 0, ?, ?, ?, ?, ?, ?)""", [int_level_name, *triplet, *triplet])
else:
# there's already a solution (possibly empty) in the file
if not replace_old_score:
print(f'There\'s already a solution to {int_level_name}, doing nothing')
return
print(f'Replacing previous solution to {int_level_name}')
sv_cur.execute(r"""UPDATE Level
SET passed = 1, mastered = 0, cycles = ?, symbols = ?, reactors = ?
WHERE id = ?""", triplet + [int_level_name])
# delete everything (Component, Member, Annotation, Pipe, UndoPtr, Undo) about the old solution
sv_cur.execute(r'SELECT rowid FROM Component WHERE level_id = ?', (int_level_name,))
comp_ids = [row[0] for row in sv_cur.fetchall()]
qm_list = ','.join('?'*len(comp_ids))
for table in ['Member', 'Annotation', 'Pipe']:
sv_cur.execute(fr'DELETE FROM {table} WHERE component_id in ({qm_list})', comp_ids)
for table in ['Component', 'UndoPtr', 'Undo']:
sv_cur.execute(fr'DELETE FROM {table} WHERE level_id = ?', (int_level_name,))
# get the reactors from db
sn_cur.execute(r""" select component_id, type, x, y
from components
where solution_id = %s
order by component_id;""", (sol_id,))
reactors = sn_cur.fetchall()
for reactor in reactors:
comp_id = reactor['component_id']
seq_comp += 1
sv_cur.execute(r"""INSERT INTO Component
VALUES (?, ?, ?, ?, ?, NULL, 200, 255, 0)""",
[seq_comp, int_level_name, reactor['type'], reactor['x'], reactor['y']])
# get all its pipes
sn_cur.execute(r"select * from pipes where component_id = %s;", (comp_id,))
pipes = ([], [])
for pipe_point in sn_cur:
pipes[pipe_point['output_id']].append(Point(pipe_point['x'], pipe_point['y']))
for out_id, pipe in enumerate(pipes):
if not pipe:
continue
reordered_pipe = reorder_pipe(pipe, seeds[(reactor['type'], out_id)])
for pipe_point in reordered_pipe:
sv_cur.execute(r"INSERT INTO Pipe VALUES (?, ?, ?, ?)", (seq_comp, out_id, pipe_point.x, pipe_point.y))
# get all its symbols
sn_cur.execute(r"select * from members where component_id = %s;", (comp_id,))
for symbol in sn_cur:
seq_memb += 1
sv_cur.execute(r"""INSERT INTO Member
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""",
[seq_memb, seq_comp] + symbol[2:])
def main(args):
global sn_cur
global sv_cur
global seq_comp
global seq_memb
global seeds
# connections
sn_conn = psycopg2.connect(dbname='solutionnet')
sn_cur = sn_conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
sv_conn = sqlite3.connect(args.savefile)
sv_cur = sv_conn.cursor()
# get the sequences
seqs = sv_cur.execute('select seq from sqlite_sequence order by 1')
seq_comp, seq_memb = (int(seq[0]) for seq in seqs)
# populate seeds map
with open('seeds.csv') as levelscsv:
reader = csv.DictReader(levelscsv, skipinitialspace=True)
for row in reader:
seeds[(row['type'], int(row['output']))] = Point(int(row['x']), int(row['y']))
for sol_id in args.sol_ids:
print(f'Loading solution {sol_id}')
load_solution(sol_id, args.replace_scores)
# write sequences
sv_cur.execute(fr"""UPDATE sqlite_sequence
SET seq = CASE
WHEN name = 'Component' THEN {seq_comp}
WHEN name = 'Member' THEN {seq_memb}
END""")
sv_conn.commit()
sv_conn.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--savefile", action="store", default=r'saves/111.user')
parser.add_argument("--no-replace-scores", action="store_false", dest='replace_scores')
parser.add_argument("sol_ids", nargs='*', type=int, default=[47424])
args = parser.parse_args()
main(args)
Get the save sequences to handle themselves
#!/usr/bin/env python3
import argparse
import collections
import csv
import sqlite3
import os
import psycopg2
import psycopg2.extras
Point = collections.namedtuple('Point', ['x', 'y'])
sn_cur = None
sv_cur = None
seeds = {}
def reorder_pipe(pipe, seed):
"""Bounds: -24;30;-18;21"""
x_size = 56
y_size = 41
field = [[0]*y_size for _ in range(x_size)]
for pt in pipe:
field[pt.x][pt.y] = 1
output = [seed]
if field[seed.x][seed.y] == 0:
# bad stuff happened
print(f'Seed {seed} not found')
return output
prev = None
curr = seed
while True:
adiacence = [(curr.x+dx, curr.y+dy, field[curr.x+dx][curr.y+dy]) for dx,dy in [(1,0), (-1,0), (0,1), (0,-1)]]
s = sum(val[2] for val in adiacence)
if prev is None and s == 1:
# starting, good case
prev,curr = curr,Point(*([(x,y) for x,y,o in adiacence if o == 1][0]))
output.append(curr)
elif prev is not None and s == 2:
# continuing, good case
prev,curr = curr,Point(*([(x,y) for x,y,o in adiacence if o == 1 and not (x == prev.x and y == prev.y)][0]))
output.append(curr)
else:
# see if we've finished all the pipes
if len(output) != len(pipe):
print('Incomplete piping')
return output
def load_solution(sol_id, replace_old_score):
# get the level and the score
sn_cur.execute(r'''select internal_name, cycle_count, symbol_count, reactor_count
from solutions s, levels l
where s.level_id = l.level_id
and s.solution_id = %s;''', (sol_id,))
solution = sn_cur.fetchone()
if solution is None:
print(f'ERROR: Invalid solution id: {sol_id}')
exit()
int_level_name = solution['internal_name']
triplet = solution[1:]
# check if the level has a solution already
sv_cur.execute(r"SELECT count(*) from Level where id = ?", (int_level_name,))
count = sv_cur.fetchone()[0]
if count == 0:
# need to insert the level
print(f'Adding new solution to {int_level_name}')
sv_cur.execute(r"""INSERT INTO Level
VALUES (?, 1, 0, ?, ?, ?, ?, ?, ?)""", [int_level_name, *triplet, *triplet])
else:
# there's already a solution (possibly empty) in the file
if not replace_old_score:
print(f'There\'s already a solution to {int_level_name}, doing nothing')
return
print(f'Replacing previous solution to {int_level_name}')
sv_cur.execute(r"""UPDATE Level
SET passed = 1, mastered = 0, cycles = ?, symbols = ?, reactors = ?
WHERE id = ?""", triplet + [int_level_name])
# delete everything (Component, Member, Annotation, Pipe, UndoPtr, Undo) about the old solution
sv_cur.execute(r'SELECT rowid FROM Component WHERE level_id = ?', (int_level_name,))
comp_ids = [row[0] for row in sv_cur.fetchall()]
qm_list = ','.join('?'*len(comp_ids))
for table in ['Member', 'Annotation', 'Pipe']:
sv_cur.execute(fr'DELETE FROM {table} WHERE component_id in ({qm_list})', comp_ids)
for table in ['Component', 'UndoPtr', 'Undo']:
sv_cur.execute(fr'DELETE FROM {table} WHERE level_id = ?', (int_level_name,))
# get the reactors from db
sn_cur.execute(r""" select component_id, type, x, y
from components
where solution_id = %s
order by component_id;""", (sol_id,))
reactors = sn_cur.fetchall()
for reactor in reactors:
comp_id = reactor['component_id']
sv_cur.execute(r"""INSERT INTO Component
VALUES (NULL, ?, ?, ?, ?, NULL, 200, 255, 0)""",
[int_level_name, reactor['type'], reactor['x'], reactor['y']])
seq_comp = sv_cur.lastrowid
# get all its pipes
sn_cur.execute(r"select * from pipes where component_id = %s;", (comp_id,))
pipes = ([], [])
for pipe_point in sn_cur:
pipes[pipe_point['output_id']].append(Point(pipe_point['x'], pipe_point['y']))
for out_id, pipe in enumerate(pipes):
if not pipe:
continue
reordered_pipe = reorder_pipe(pipe, seeds[(reactor['type'], out_id)])
for pipe_point in reordered_pipe:
sv_cur.execute(r"INSERT INTO Pipe VALUES (?, ?, ?, ?)", (seq_comp, out_id, pipe_point.x, pipe_point.y))
# get all its symbols
sn_cur.execute(r"select * from members where component_id = %s;", (comp_id,))
for symbol in sn_cur:
sv_cur.execute(r"""INSERT INTO Member
VALUES (NULL, ?, ?, ?, ?, ?, ?, ?, ?, ?)""",
[seq_comp] + symbol[2:])
def main():
global sn_cur
global sv_cur
global seeds
# connections
sn_conn = psycopg2.connect(dbname='solutionnet')
sn_cur = sn_conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
sv_conn = sqlite3.connect(args.savefile)
sv_cur = sv_conn.cursor()
# populate seeds map
with open('seeds.csv') as levelscsv:
reader = csv.DictReader(levelscsv, skipinitialspace=True)
for row in reader:
seeds[(row['type'], int(row['output']))] = Point(int(row['x']), int(row['y']))
for sol_id in args.sol_ids:
print(f'Loading solution {sol_id}')
load_solution(sol_id, args.replace_scores)
sv_conn.commit()
sv_conn.close()
if __name__ == '__main__':
save_path = os.path.dirname(os.path.realpath(__file__)) + r'/../saves/12345ieee/111.user'
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--savefile", action="store", default=save_path)
parser.add_argument("--no-replace-scores", action="store_false", dest='replace_scores')
parser.add_argument("sol_ids", nargs='*', type=int, default=[47424])
args = parser.parse_args()
main()
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utility functions used for integrating Metrics API into load tests pipelines.
Metrics are send to BigQuery in following format:
test_id | submit_timestamp | metric_type | value
The 'test_id' is common for all metrics for one run.
Currently it is possible to have following metrics types:
* runtime
* total_bytes_count
"""
# pytype: skip-file
import json
import logging
import time
import uuid
from typing import Any
from typing import List
from typing import Mapping
from typing import Optional
from typing import Union
import requests
from requests.auth import HTTPBasicAuth
import apache_beam as beam
from apache_beam.metrics import Metrics
from apache_beam.transforms.window import TimestampedValue
from apache_beam.utils.timestamp import Timestamp
try:
from google.cloud import bigquery # type: ignore
from google.cloud.bigquery.schema import SchemaField
from google.cloud.exceptions import NotFound
except ImportError:
bigquery = None
SchemaField = None
NotFound = None
RUNTIME_METRIC = 'runtime'
COUNTER_LABEL = 'total_bytes_count'
ID_LABEL = 'test_id'
SUBMIT_TIMESTAMP_LABEL = 'timestamp'
METRICS_TYPE_LABEL = 'metric'
VALUE_LABEL = 'value'
SCHEMA = [{
'name': ID_LABEL, 'field_type': 'STRING', 'mode': 'REQUIRED'
},
{
'name': SUBMIT_TIMESTAMP_LABEL,
'field_type': 'TIMESTAMP',
'mode': 'REQUIRED'
},
{
'name': METRICS_TYPE_LABEL,
'field_type': 'STRING',
'mode': 'REQUIRED'
}, {
'name': VALUE_LABEL, 'field_type': 'FLOAT', 'mode': 'REQUIRED'
}]
_LOGGER = logging.getLogger(__name__)
def parse_step(step_name):
"""Replaces white spaces and removes 'Step:' label
Args:
step_name(str): step name passed in metric ParDo
Returns:
lower case step name without namespace and step label
"""
return step_name.lower().replace(' ', '_').strip('step:_')
def split_metrics_by_namespace_and_name(metrics, namespace, name):
"""Splits metrics list namespace and name.
Args:
metrics: list of metrics from pipeline result
namespace(str): filter metrics by namespace
name(str): filter metrics by name
Returns:
two lists - one of metrics which are matching filters
and second of not matching
"""
matching_metrics = []
not_matching_metrics = []
for dist in metrics:
if dist.key.metric.namespace == namespace\
and dist.key.metric.name == name:
matching_metrics.append(dist)
else:
not_matching_metrics.append(dist)
return matching_metrics, not_matching_metrics
def get_generic_distributions(generic_dists, metric_id):
"""Creates flatten list of distributions per its value type.
A generic distribution is the one which is not processed but saved in
the most raw version.
Args:
generic_dists: list of distributions to be saved
metric_id(uuid): id of the current test run
Returns:
list of dictionaries made from :class:`DistributionMetric`
"""
return sum((
get_all_distributions_by_type(dist, metric_id) for dist in generic_dists),
[])
def get_all_distributions_by_type(dist, metric_id):
"""Creates new list of objects with type of each distribution
metric value.
Args:
dist(object): DistributionMetric object to be parsed
metric_id(uuid): id of the current test run
Returns:
list of :class:`DistributionMetric` objects
"""
submit_timestamp = time.time()
dist_types = ['count', 'max', 'min', 'sum']
distribution_dicts = []
for dist_type in dist_types:
try:
distribution_dicts.append(
get_distribution_dict(dist_type, submit_timestamp, dist, metric_id))
except ValueError:
# Ignore metrics with 'None' values.
continue
return distribution_dicts
def get_distribution_dict(metric_type, submit_timestamp, dist, metric_id):
"""Function creates :class:`DistributionMetric`
Args:
metric_type(str): type of value from distribution metric which will
be saved (ex. max, min, mean, sum)
submit_timestamp: timestamp when metric is saved
dist(object) distribution object from pipeline result
metric_id(uuid): id of the current test run
Returns:
dictionary prepared for saving according to schema
"""
return DistributionMetric(dist, submit_timestamp, metric_id,
metric_type).as_dict()
class MetricsReader(object):
"""
A :class:`MetricsReader` retrieves metrics from pipeline result,
prepares it for publishers and setup publishers.
"""
publishers = [] # type: List[Any]
def __init__(
self,
project_name=None,
bq_table=None,
bq_dataset=None,
publish_to_bq=False,
influxdb_options=None, # type: Optional[InfluxDBMetricsPublisherOptions]
namespace=None,
filters=None):
"""Initializes :class:`MetricsReader` .
Args:
project_name (str): project with BigQuery where metrics will be saved
bq_table (str): BigQuery table where metrics will be saved
bq_dataset (str): BigQuery dataset where metrics will be saved
namespace (str): Namespace of the metrics
filters: MetricFilter to query only filtered metrics
"""
self._namespace = namespace
self.publishers.append(ConsoleMetricsPublisher())
check = project_name and bq_table and bq_dataset and publish_to_bq
if check:
bq_publisher = BigQueryMetricsPublisher(
project_name, bq_table, bq_dataset)
self.publishers.append(bq_publisher)
if influxdb_options and influxdb_options.validate():
self.publishers.append(InfluxDBMetricsPublisher(influxdb_options))
else:
_LOGGER.info(
'Missing InfluxDB options. Metrics will not be published to '
'InfluxDB')
self.filters = filters
def publish_metrics(self, result, extra_metrics: dict):
metric_id = uuid.uuid4().hex
metrics = result.metrics().query(self.filters)
# Metrics from pipeline result are stored in map with keys: 'gauges',
# 'distributions' and 'counters'.
# Under each key there is list of objects of each metric type. It is
# required to prepare metrics for publishing purposes. Expected is to have
# a list of dictionaries matching the schema.
insert_dicts = self._prepare_all_metrics(metrics, metric_id)
insert_dicts += self._prepare_extra_metrics(extra_metrics, metric_id)
if len(insert_dicts) > 0:
for publisher in self.publishers:
publisher.publish(insert_dicts)
def _prepare_extra_metrics(self, extra_metrics: dict, metric_id: str):
ts = time.time()
return [
Metric(ts, metric_id, v, label=k).as_dict() for k,
v in extra_metrics.items()
]
def publish_values(self, labeled_values):
"""The method to publish simple labeled values.
Args:
labeled_values (List[Tuple(str, int)]): list of (label, value)
"""
metric_dicts = [
Metric(time.time(), uuid.uuid4().hex, value, label=label).as_dict()
for label,
value in labeled_values
]
for publisher in self.publishers:
publisher.publish(metric_dicts)
def _prepare_all_metrics(self, metrics, metric_id):
insert_rows = self._get_counters(metrics['counters'], metric_id)
insert_rows += self._get_distributions(metrics['distributions'], metric_id)
return insert_rows
def _get_counters(self, counters, metric_id):
submit_timestamp = time.time()
return [
CounterMetric(counter, submit_timestamp, metric_id).as_dict()
for counter in counters
]
def _get_distributions(self, distributions, metric_id):
rows = []
matching_namsespace, not_matching_namespace = \
split_metrics_by_namespace_and_name(distributions, self._namespace,
RUNTIME_METRIC)
if len(matching_namsespace) > 0:
runtime_metric = RuntimeMetric(matching_namsespace, metric_id)
rows.append(runtime_metric.as_dict())
if len(not_matching_namespace) > 0:
rows += get_generic_distributions(not_matching_namespace, metric_id)
return rows
class Metric(object):
"""Metric base class in ready-to-save format."""
def __init__(
self, submit_timestamp, metric_id, value, metric=None, label=None):
"""Initializes :class:`Metric`
Args:
metric (object): object of metric result
submit_timestamp (float): date-time of saving metric to database
metric_id (uuid): unique id to identify test run
value: value of metric
label: custom metric name to be saved in database
"""
self.submit_timestamp = submit_timestamp
self.metric_id = metric_id
self.label = label or metric.key.metric.namespace + \
'_' + parse_step(metric.key.step) + \
'_' + metric.key.metric.name
self.value = value
def as_dict(self):
return {
SUBMIT_TIMESTAMP_LABEL: self.submit_timestamp,
ID_LABEL: self.metric_id,
VALUE_LABEL: self.value,
METRICS_TYPE_LABEL: self.label
}
class CounterMetric(Metric):
"""The Counter Metric in ready-to-publish format.
Args:
counter_metric (object): counter metric object from MetricResult
submit_timestamp (float): date-time of saving metric to database
metric_id (uuid): unique id to identify test run
"""
def __init__(self, counter_metric, submit_timestamp, metric_id):
value = counter_metric.result
super(CounterMetric,
self).__init__(submit_timestamp, metric_id, value, counter_metric)
class DistributionMetric(Metric):
"""The Distribution Metric in ready-to-publish format.
Args:
dist_metric (object): distribution metric object from MetricResult
submit_timestamp (float): date-time of saving metric to database
metric_id (uuid): unique id to identify test run
"""
def __init__(self, dist_metric, submit_timestamp, metric_id, metric_type):
custom_label = dist_metric.key.metric.namespace + \
'_' + parse_step(dist_metric.key.step) + \
'_' + metric_type + \
'_' + dist_metric.key.metric.name
value = getattr(dist_metric.result, metric_type)
if value is None:
msg = '%s: the result is expected to be an integer, ' \
'not None.' % custom_label
_LOGGER.debug(msg)
raise ValueError(msg)
super(DistributionMetric, self) \
.__init__(submit_timestamp, metric_id, value, dist_metric, custom_label)
class RuntimeMetric(Metric):
"""The Distribution Metric in ready-to-publish format.
Args:
runtime_list: list of distributions metrics from MetricResult
with runtime name
metric_id(uuid): unique id to identify test run
"""
def __init__(self, runtime_list, metric_id):
value = self._prepare_runtime_metrics(runtime_list)
submit_timestamp = time.time()
# Label does not include step name, because it is one value calculated
# out of many steps
label = runtime_list[0].key.metric.namespace + \
'_' + RUNTIME_METRIC
super(RuntimeMetric,
self).__init__(submit_timestamp, metric_id, value, None, label)
def _prepare_runtime_metrics(self, distributions):
min_values = []
max_values = []
for dist in distributions:
min_values.append(dist.result.min)
max_values.append(dist.result.max)
# finding real start
min_value = min(min_values)
# finding real end
max_value = max(max_values)
runtime_in_s = float(max_value - min_value)
return runtime_in_s
class ConsoleMetricsPublisher(object):
"""A :class:`ConsoleMetricsPublisher` publishes collected metrics
to console output."""
def publish(self, results):
if len(results) > 0:
log = "Load test results for test: %s and timestamp: %s:" \
% (results[0][ID_LABEL], results[0][SUBMIT_TIMESTAMP_LABEL])
_LOGGER.info(log)
for result in results:
log = "Metric: %s Value: %d" \
% (result[METRICS_TYPE_LABEL], result[VALUE_LABEL])
_LOGGER.info(log)
else:
_LOGGER.info("No test results were collected.")
class BigQueryMetricsPublisher(object):
"""A :class:`BigQueryMetricsPublisher` publishes collected metrics
to BigQuery output."""
def __init__(self, project_name, table, dataset):
self.bq = BigQueryClient(project_name, table, dataset)
def publish(self, results):
outputs = self.bq.save(results)
if len(outputs) > 0:
for output in outputs:
if output['errors']:
_LOGGER.error(output)
raise ValueError(
'Unable save rows in BigQuery: {}'.format(output['errors']))
class BigQueryClient(object):
"""A :class:`BigQueryClient` publishes collected metrics to
BigQuery output."""
def __init__(self, project_name, table, dataset):
self._namespace = table
self._client = bigquery.Client(project=project_name)
self._schema_names = self._get_schema_names()
schema = self._prepare_schema()
self._get_or_create_table(schema, dataset)
def _get_schema_names(self):
return [schema['name'] for schema in SCHEMA]
def _prepare_schema(self):
return [SchemaField(**row) for row in SCHEMA]
def _get_or_create_table(self, bq_schemas, dataset):
if self._namespace == '':
raise ValueError('Namespace cannot be empty.')
dataset = self._get_dataset(dataset)
table_ref = dataset.table(self._namespace)
try:
self._bq_table = self._client.get_table(table_ref)
except NotFound:
table = bigquery.Table(table_ref, schema=bq_schemas)
self._bq_table = self._client.create_table(table)
def _get_dataset(self, dataset_name):
bq_dataset_ref = self._client.dataset(dataset_name)
try:
bq_dataset = self._client.get_dataset(bq_dataset_ref)
except NotFound:
raise ValueError(
'Dataset {} does not exist in your project. '
'You have to create table first.'.format(dataset_name))
return bq_dataset
def save(self, results):
return self._client.insert_rows(self._bq_table, results)
class InfluxDBMetricsPublisherOptions(object):
def __init__(
self,
measurement, # type: str
db_name, # type: str
hostname, # type: str
user=None, # type: Optional[str]
password=None # type: Optional[str]
):
self.measurement = measurement
self.db_name = db_name
self.hostname = hostname
self.user = user
self.password = password
def validate(self):
# type: () -> bool
return bool(self.measurement) and bool(self.db_name)
def http_auth_enabled(self):
# type: () -> bool
return self.user is not None and self.password is not None
class InfluxDBMetricsPublisher(object):
"""Publishes collected metrics to InfluxDB database."""
def __init__(
self,
options # type: InfluxDBMetricsPublisherOptions
):
self.options = options
def publish(self, results):
# type: (List[Mapping[str, Union[float, str, int]]]) -> None
url = '{}/write'.format(self.options.hostname)
payload = self._build_payload(results)
query_str = {'db': self.options.db_name, 'precision': 's'}
auth = HTTPBasicAuth(self.options.user, self.options.password) if \
self.options.http_auth_enabled() else None
try:
response = requests.post(url, params=query_str, data=payload, auth=auth)
except requests.exceptions.RequestException as e:
_LOGGER.warning('Failed to publish metrics to InfluxDB: ' + str(e))
else:
if response.status_code != 204:
content = json.loads(response.content)
_LOGGER.warning(
'Failed to publish metrics to InfluxDB. Received status code %s '
'with an error message: %s' %
(response.status_code, content['error']))
def _build_payload(self, results):
# type: (List[Mapping[str, Union[float, str, int]]]) -> str
def build_kv(mapping, key):
return '{}={}'.format(key, mapping[key])
points = []
for result in results:
comma_separated = [
self.options.measurement,
build_kv(result, METRICS_TYPE_LABEL),
build_kv(result, ID_LABEL),
]
point = ','.join(comma_separated) + ' ' + build_kv(result, VALUE_LABEL) \
+ ' ' + str(int(result[SUBMIT_TIMESTAMP_LABEL]))
points.append(point)
return '\n'.join(points)
class MeasureTime(beam.DoFn):
"""A distribution metric prepared to be added to pipeline as ParDo
to measure runtime."""
def __init__(self, namespace):
"""Initializes :class:`MeasureTime`.
namespace(str): namespace of metric
"""
self.namespace = namespace
self.runtime = Metrics.distribution(self.namespace, RUNTIME_METRIC)
def start_bundle(self):
self.runtime.update(time.time())
def finish_bundle(self):
self.runtime.update(time.time())
def process(self, element):
yield element
class MeasureBytes(beam.DoFn):
"""Metric to measure how many bytes was observed in pipeline."""
LABEL = 'total_bytes'
def __init__(self, namespace, extractor=None):
"""Initializes :class:`MeasureBytes`.
Args:
namespace(str): metric namespace
extractor: function to extract elements to be count
"""
self.namespace = namespace
self.counter = Metrics.counter(self.namespace, self.LABEL)
self.extractor = extractor if extractor else lambda x: (yield x)
def process(self, element, *args):
for value in self.extractor(element, *args):
self.counter.inc(len(value))
yield element
class CountMessages(beam.DoFn):
LABEL = 'total_messages'
def __init__(self, namespace):
self.namespace = namespace
self.counter = Metrics.counter(self.namespace, self.LABEL)
def process(self, element):
self.counter.inc(1)
yield element
class MeasureLatency(beam.DoFn):
"""A distribution metric which captures the latency based on the timestamps
of the processed elements."""
LABEL = 'latency'
def __init__(self, namespace):
"""Initializes :class:`MeasureLatency`.
namespace(str): namespace of metric
"""
self.namespace = namespace
self.latency_ms = Metrics.distribution(self.namespace, self.LABEL)
self.time_fn = time.time
def process(self, element, timestamp=beam.DoFn.TimestampParam):
self.latency_ms.update(
int(self.time_fn() * 1000) - (timestamp.micros // 1000))
yield element
class AssignTimestamps(beam.DoFn):
"""DoFn to assigned timestamps to elements."""
def __init__(self):
# Avoid having to use save_main_session
self.time_fn = time.time
self.timestamp_val_fn = TimestampedValue
self.timestamp_fn = Timestamp
def process(self, element):
yield self.timestamp_val_fn(
element, self.timestamp_fn(micros=int(self.time_fn() * 1000000)))
Revert type: ignore caused by PR 13617
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utility functions used for integrating Metrics API into load tests pipelines.
Metrics are send to BigQuery in following format:
test_id | submit_timestamp | metric_type | value
The 'test_id' is common for all metrics for one run.
Currently it is possible to have following metrics types:
* runtime
* total_bytes_count
"""
# pytype: skip-file
import json
import logging
import time
import uuid
from typing import Any
from typing import List
from typing import Mapping
from typing import Optional
from typing import Union
import requests
from requests.auth import HTTPBasicAuth
import apache_beam as beam
from apache_beam.metrics import Metrics
from apache_beam.transforms.window import TimestampedValue
from apache_beam.utils.timestamp import Timestamp
try:
from google.cloud import bigquery
from google.cloud.bigquery.schema import SchemaField
from google.cloud.exceptions import NotFound
except ImportError:
bigquery = None
SchemaField = None
NotFound = None
RUNTIME_METRIC = 'runtime'
COUNTER_LABEL = 'total_bytes_count'
ID_LABEL = 'test_id'
SUBMIT_TIMESTAMP_LABEL = 'timestamp'
METRICS_TYPE_LABEL = 'metric'
VALUE_LABEL = 'value'
SCHEMA = [{
'name': ID_LABEL, 'field_type': 'STRING', 'mode': 'REQUIRED'
},
{
'name': SUBMIT_TIMESTAMP_LABEL,
'field_type': 'TIMESTAMP',
'mode': 'REQUIRED'
},
{
'name': METRICS_TYPE_LABEL,
'field_type': 'STRING',
'mode': 'REQUIRED'
}, {
'name': VALUE_LABEL, 'field_type': 'FLOAT', 'mode': 'REQUIRED'
}]
_LOGGER = logging.getLogger(__name__)
def parse_step(step_name):
"""Replaces white spaces and removes 'Step:' label
Args:
step_name(str): step name passed in metric ParDo
Returns:
lower case step name without namespace and step label
"""
return step_name.lower().replace(' ', '_').strip('step:_')
def split_metrics_by_namespace_and_name(metrics, namespace, name):
"""Splits metrics list namespace and name.
Args:
metrics: list of metrics from pipeline result
namespace(str): filter metrics by namespace
name(str): filter metrics by name
Returns:
two lists - one of metrics which are matching filters
and second of not matching
"""
matching_metrics = []
not_matching_metrics = []
for dist in metrics:
if dist.key.metric.namespace == namespace\
and dist.key.metric.name == name:
matching_metrics.append(dist)
else:
not_matching_metrics.append(dist)
return matching_metrics, not_matching_metrics
def get_generic_distributions(generic_dists, metric_id):
"""Creates flatten list of distributions per its value type.
A generic distribution is the one which is not processed but saved in
the most raw version.
Args:
generic_dists: list of distributions to be saved
metric_id(uuid): id of the current test run
Returns:
list of dictionaries made from :class:`DistributionMetric`
"""
return sum((
get_all_distributions_by_type(dist, metric_id) for dist in generic_dists),
[])
def get_all_distributions_by_type(dist, metric_id):
"""Creates new list of objects with type of each distribution
metric value.
Args:
dist(object): DistributionMetric object to be parsed
metric_id(uuid): id of the current test run
Returns:
list of :class:`DistributionMetric` objects
"""
submit_timestamp = time.time()
dist_types = ['count', 'max', 'min', 'sum']
distribution_dicts = []
for dist_type in dist_types:
try:
distribution_dicts.append(
get_distribution_dict(dist_type, submit_timestamp, dist, metric_id))
except ValueError:
# Ignore metrics with 'None' values.
continue
return distribution_dicts
def get_distribution_dict(metric_type, submit_timestamp, dist, metric_id):
"""Function creates :class:`DistributionMetric`
Args:
metric_type(str): type of value from distribution metric which will
be saved (ex. max, min, mean, sum)
submit_timestamp: timestamp when metric is saved
dist(object) distribution object from pipeline result
metric_id(uuid): id of the current test run
Returns:
dictionary prepared for saving according to schema
"""
return DistributionMetric(dist, submit_timestamp, metric_id,
metric_type).as_dict()
class MetricsReader(object):
"""
A :class:`MetricsReader` retrieves metrics from pipeline result,
prepares it for publishers and setup publishers.
"""
publishers = [] # type: List[Any]
def __init__(
self,
project_name=None,
bq_table=None,
bq_dataset=None,
publish_to_bq=False,
influxdb_options=None, # type: Optional[InfluxDBMetricsPublisherOptions]
namespace=None,
filters=None):
"""Initializes :class:`MetricsReader` .
Args:
project_name (str): project with BigQuery where metrics will be saved
bq_table (str): BigQuery table where metrics will be saved
bq_dataset (str): BigQuery dataset where metrics will be saved
namespace (str): Namespace of the metrics
filters: MetricFilter to query only filtered metrics
"""
self._namespace = namespace
self.publishers.append(ConsoleMetricsPublisher())
check = project_name and bq_table and bq_dataset and publish_to_bq
if check:
bq_publisher = BigQueryMetricsPublisher(
project_name, bq_table, bq_dataset)
self.publishers.append(bq_publisher)
if influxdb_options and influxdb_options.validate():
self.publishers.append(InfluxDBMetricsPublisher(influxdb_options))
else:
_LOGGER.info(
'Missing InfluxDB options. Metrics will not be published to '
'InfluxDB')
self.filters = filters
def publish_metrics(self, result, extra_metrics: dict):
metric_id = uuid.uuid4().hex
metrics = result.metrics().query(self.filters)
# Metrics from pipeline result are stored in map with keys: 'gauges',
# 'distributions' and 'counters'.
# Under each key there is list of objects of each metric type. It is
# required to prepare metrics for publishing purposes. Expected is to have
# a list of dictionaries matching the schema.
insert_dicts = self._prepare_all_metrics(metrics, metric_id)
insert_dicts += self._prepare_extra_metrics(extra_metrics, metric_id)
if len(insert_dicts) > 0:
for publisher in self.publishers:
publisher.publish(insert_dicts)
def _prepare_extra_metrics(self, extra_metrics: dict, metric_id: str):
ts = time.time()
return [
Metric(ts, metric_id, v, label=k).as_dict() for k,
v in extra_metrics.items()
]
def publish_values(self, labeled_values):
"""The method to publish simple labeled values.
Args:
labeled_values (List[Tuple(str, int)]): list of (label, value)
"""
metric_dicts = [
Metric(time.time(), uuid.uuid4().hex, value, label=label).as_dict()
for label,
value in labeled_values
]
for publisher in self.publishers:
publisher.publish(metric_dicts)
def _prepare_all_metrics(self, metrics, metric_id):
insert_rows = self._get_counters(metrics['counters'], metric_id)
insert_rows += self._get_distributions(metrics['distributions'], metric_id)
return insert_rows
def _get_counters(self, counters, metric_id):
submit_timestamp = time.time()
return [
CounterMetric(counter, submit_timestamp, metric_id).as_dict()
for counter in counters
]
def _get_distributions(self, distributions, metric_id):
rows = []
matching_namsespace, not_matching_namespace = \
split_metrics_by_namespace_and_name(distributions, self._namespace,
RUNTIME_METRIC)
if len(matching_namsespace) > 0:
runtime_metric = RuntimeMetric(matching_namsespace, metric_id)
rows.append(runtime_metric.as_dict())
if len(not_matching_namespace) > 0:
rows += get_generic_distributions(not_matching_namespace, metric_id)
return rows
class Metric(object):
"""Metric base class in ready-to-save format."""
def __init__(
self, submit_timestamp, metric_id, value, metric=None, label=None):
"""Initializes :class:`Metric`
Args:
metric (object): object of metric result
submit_timestamp (float): date-time of saving metric to database
metric_id (uuid): unique id to identify test run
value: value of metric
label: custom metric name to be saved in database
"""
self.submit_timestamp = submit_timestamp
self.metric_id = metric_id
self.label = label or metric.key.metric.namespace + \
'_' + parse_step(metric.key.step) + \
'_' + metric.key.metric.name
self.value = value
def as_dict(self):
return {
SUBMIT_TIMESTAMP_LABEL: self.submit_timestamp,
ID_LABEL: self.metric_id,
VALUE_LABEL: self.value,
METRICS_TYPE_LABEL: self.label
}
class CounterMetric(Metric):
"""The Counter Metric in ready-to-publish format.
Args:
counter_metric (object): counter metric object from MetricResult
submit_timestamp (float): date-time of saving metric to database
metric_id (uuid): unique id to identify test run
"""
def __init__(self, counter_metric, submit_timestamp, metric_id):
value = counter_metric.result
super(CounterMetric,
self).__init__(submit_timestamp, metric_id, value, counter_metric)
class DistributionMetric(Metric):
"""The Distribution Metric in ready-to-publish format.
Args:
dist_metric (object): distribution metric object from MetricResult
submit_timestamp (float): date-time of saving metric to database
metric_id (uuid): unique id to identify test run
"""
def __init__(self, dist_metric, submit_timestamp, metric_id, metric_type):
custom_label = dist_metric.key.metric.namespace + \
'_' + parse_step(dist_metric.key.step) + \
'_' + metric_type + \
'_' + dist_metric.key.metric.name
value = getattr(dist_metric.result, metric_type)
if value is None:
msg = '%s: the result is expected to be an integer, ' \
'not None.' % custom_label
_LOGGER.debug(msg)
raise ValueError(msg)
super(DistributionMetric, self) \
.__init__(submit_timestamp, metric_id, value, dist_metric, custom_label)
class RuntimeMetric(Metric):
"""The Distribution Metric in ready-to-publish format.
Args:
runtime_list: list of distributions metrics from MetricResult
with runtime name
metric_id(uuid): unique id to identify test run
"""
def __init__(self, runtime_list, metric_id):
value = self._prepare_runtime_metrics(runtime_list)
submit_timestamp = time.time()
# Label does not include step name, because it is one value calculated
# out of many steps
label = runtime_list[0].key.metric.namespace + \
'_' + RUNTIME_METRIC
super(RuntimeMetric,
self).__init__(submit_timestamp, metric_id, value, None, label)
def _prepare_runtime_metrics(self, distributions):
min_values = []
max_values = []
for dist in distributions:
min_values.append(dist.result.min)
max_values.append(dist.result.max)
# finding real start
min_value = min(min_values)
# finding real end
max_value = max(max_values)
runtime_in_s = float(max_value - min_value)
return runtime_in_s
class ConsoleMetricsPublisher(object):
"""A :class:`ConsoleMetricsPublisher` publishes collected metrics
to console output."""
def publish(self, results):
if len(results) > 0:
log = "Load test results for test: %s and timestamp: %s:" \
% (results[0][ID_LABEL], results[0][SUBMIT_TIMESTAMP_LABEL])
_LOGGER.info(log)
for result in results:
log = "Metric: %s Value: %d" \
% (result[METRICS_TYPE_LABEL], result[VALUE_LABEL])
_LOGGER.info(log)
else:
_LOGGER.info("No test results were collected.")
class BigQueryMetricsPublisher(object):
"""A :class:`BigQueryMetricsPublisher` publishes collected metrics
to BigQuery output."""
def __init__(self, project_name, table, dataset):
self.bq = BigQueryClient(project_name, table, dataset)
def publish(self, results):
outputs = self.bq.save(results)
if len(outputs) > 0:
for output in outputs:
if output['errors']:
_LOGGER.error(output)
raise ValueError(
'Unable save rows in BigQuery: {}'.format(output['errors']))
class BigQueryClient(object):
"""A :class:`BigQueryClient` publishes collected metrics to
BigQuery output."""
def __init__(self, project_name, table, dataset):
self._namespace = table
self._client = bigquery.Client(project=project_name)
self._schema_names = self._get_schema_names()
schema = self._prepare_schema()
self._get_or_create_table(schema, dataset)
def _get_schema_names(self):
return [schema['name'] for schema in SCHEMA]
def _prepare_schema(self):
return [SchemaField(**row) for row in SCHEMA]
def _get_or_create_table(self, bq_schemas, dataset):
if self._namespace == '':
raise ValueError('Namespace cannot be empty.')
dataset = self._get_dataset(dataset)
table_ref = dataset.table(self._namespace)
try:
self._bq_table = self._client.get_table(table_ref)
except NotFound:
table = bigquery.Table(table_ref, schema=bq_schemas)
self._bq_table = self._client.create_table(table)
def _get_dataset(self, dataset_name):
bq_dataset_ref = self._client.dataset(dataset_name)
try:
bq_dataset = self._client.get_dataset(bq_dataset_ref)
except NotFound:
raise ValueError(
'Dataset {} does not exist in your project. '
'You have to create table first.'.format(dataset_name))
return bq_dataset
def save(self, results):
return self._client.insert_rows(self._bq_table, results)
class InfluxDBMetricsPublisherOptions(object):
def __init__(
self,
measurement, # type: str
db_name, # type: str
hostname, # type: str
user=None, # type: Optional[str]
password=None # type: Optional[str]
):
self.measurement = measurement
self.db_name = db_name
self.hostname = hostname
self.user = user
self.password = password
def validate(self):
# type: () -> bool
return bool(self.measurement) and bool(self.db_name)
def http_auth_enabled(self):
# type: () -> bool
return self.user is not None and self.password is not None
class InfluxDBMetricsPublisher(object):
"""Publishes collected metrics to InfluxDB database."""
def __init__(
self,
options # type: InfluxDBMetricsPublisherOptions
):
self.options = options
def publish(self, results):
# type: (List[Mapping[str, Union[float, str, int]]]) -> None
url = '{}/write'.format(self.options.hostname)
payload = self._build_payload(results)
query_str = {'db': self.options.db_name, 'precision': 's'}
auth = HTTPBasicAuth(self.options.user, self.options.password) if \
self.options.http_auth_enabled() else None
try:
response = requests.post(url, params=query_str, data=payload, auth=auth)
except requests.exceptions.RequestException as e:
_LOGGER.warning('Failed to publish metrics to InfluxDB: ' + str(e))
else:
if response.status_code != 204:
content = json.loads(response.content)
_LOGGER.warning(
'Failed to publish metrics to InfluxDB. Received status code %s '
'with an error message: %s' %
(response.status_code, content['error']))
def _build_payload(self, results):
# type: (List[Mapping[str, Union[float, str, int]]]) -> str
def build_kv(mapping, key):
return '{}={}'.format(key, mapping[key])
points = []
for result in results:
comma_separated = [
self.options.measurement,
build_kv(result, METRICS_TYPE_LABEL),
build_kv(result, ID_LABEL),
]
point = ','.join(comma_separated) + ' ' + build_kv(result, VALUE_LABEL) \
+ ' ' + str(int(result[SUBMIT_TIMESTAMP_LABEL]))
points.append(point)
return '\n'.join(points)
class MeasureTime(beam.DoFn):
"""A distribution metric prepared to be added to pipeline as ParDo
to measure runtime."""
def __init__(self, namespace):
"""Initializes :class:`MeasureTime`.
namespace(str): namespace of metric
"""
self.namespace = namespace
self.runtime = Metrics.distribution(self.namespace, RUNTIME_METRIC)
def start_bundle(self):
self.runtime.update(time.time())
def finish_bundle(self):
self.runtime.update(time.time())
def process(self, element):
yield element
class MeasureBytes(beam.DoFn):
"""Metric to measure how many bytes was observed in pipeline."""
LABEL = 'total_bytes'
def __init__(self, namespace, extractor=None):
"""Initializes :class:`MeasureBytes`.
Args:
namespace(str): metric namespace
extractor: function to extract elements to be count
"""
self.namespace = namespace
self.counter = Metrics.counter(self.namespace, self.LABEL)
self.extractor = extractor if extractor else lambda x: (yield x)
def process(self, element, *args):
for value in self.extractor(element, *args):
self.counter.inc(len(value))
yield element
class CountMessages(beam.DoFn):
LABEL = 'total_messages'
def __init__(self, namespace):
self.namespace = namespace
self.counter = Metrics.counter(self.namespace, self.LABEL)
def process(self, element):
self.counter.inc(1)
yield element
class MeasureLatency(beam.DoFn):
"""A distribution metric which captures the latency based on the timestamps
of the processed elements."""
LABEL = 'latency'
def __init__(self, namespace):
"""Initializes :class:`MeasureLatency`.
namespace(str): namespace of metric
"""
self.namespace = namespace
self.latency_ms = Metrics.distribution(self.namespace, self.LABEL)
self.time_fn = time.time
def process(self, element, timestamp=beam.DoFn.TimestampParam):
self.latency_ms.update(
int(self.time_fn() * 1000) - (timestamp.micros // 1000))
yield element
class AssignTimestamps(beam.DoFn):
"""DoFn to assigned timestamps to elements."""
def __init__(self):
# Avoid having to use save_main_session
self.time_fn = time.time
self.timestamp_val_fn = TimestampedValue
self.timestamp_fn = Timestamp
def process(self, element):
yield self.timestamp_val_fn(
element, self.timestamp_fn(micros=int(self.time_fn() * 1000000)))
|
from bson import json_util
import json
import logging
import pymongo
from tqdm import tqdm
import settings
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.db.models import Q
from guardian.shortcuts import assign_perm
from reference_data.models import GENOME_VERSION_GRCh37
from seqr.views.apis import phenotips_api
from seqr.views.apis.phenotips_api import _update_individual_phenotips_data
from xbrowse_server.base.models import \
Project, \
Family, \
FamilyGroup, \
Individual, \
VariantNote, \
ProjectTag, \
VariantTag, \
ProjectCollaborator, \
ReferencePopulation
from seqr.models import \
Project as SeqrProject, \
Family as SeqrFamily, \
Individual as SeqrIndividual, \
VariantTagType as SeqrVariantTagType, \
VariantTag as SeqrVariantTag, \
VariantNote as SeqrVariantNote, \
Sample as SeqrSample, \
Dataset as SeqrDataset, \
LocusList, \
CAN_EDIT, CAN_VIEW, ModelWithGUID
from xbrowse_server.mall import get_datastore, get_annotator
logger = logging.getLogger(__name__)
# switching to python3.6 will make this unnecessary as built-in python dictionaries will be ordered
from collections import OrderedDict, defaultdict
class OrderedDefaultDict(OrderedDict, defaultdict):
def __init__(self, default_factory=None, *args, **kwargs):
super(OrderedDefaultDict, self).__init__(*args, **kwargs)
self.default_factory = default_factory
class Command(BaseCommand):
help = 'Transfer projects to the new seqr schema'
def add_arguments(self, parser):
parser.add_argument('--reset-all-models', help='This flag causes all records to be cleared from the seqr schema\'s Project, Family, and Individual models before transferring data', action='store_true')
parser.add_argument('--dont-connect-to-phenotips', help='dont retrieve phenotips internal id and latest data', action='store_true')
parser.add_argument('-w', '--wgs-projects', help='text file that lists WGS project-ids - one per line')
parser.add_argument('project_id', nargs="*", help='Project(s) to transfer. If not specified, defaults to all projects.')
def handle(self, *args, **options):
"""transfer project"""
reset_all_models = options['reset_all_models']
connect_to_phenotips = not options['dont_connect_to_phenotips']
project_ids_to_process = options['project_id']
counters = OrderedDefaultDict(int)
if reset_all_models:
print("Dropping all records from SeqrProject, SeqrFamily, SeqrIndividual")
SeqrIndividual.objects.all().delete()
SeqrFamily.objects.all().delete()
SeqrProject.objects.all().delete()
# reset models that'll be regenerated
if not project_ids_to_process:
SeqrVariantTagType.objects.all().delete()
SeqrVariantTag.objects.all().delete()
SeqrVariantNote.objects.all().delete()
SeqrSample.objects.all().delete()
SeqrDataset.objects.all().delete()
if project_ids_to_process:
projects = Project.objects.filter(project_id__in=project_ids_to_process)
logging.info("Processing %s projects" % len(projects))
else:
projects = Project.objects.filter(
~Q(project_id__contains="DEPRECATED") &
~Q(project_name__contains="DEPRECATED") &
~Q(project_id__istartswith="temp") &
~Q(project_id__istartswith="test_")
)
logging.info("Processing all %s projects" % len(projects))
wgs_project_ids = {}
if options['wgs_projects']:
with open(options['wgs_projects']) as f:
wgs_project_ids = {line.strip().lower() for line in f if len(line.strip()) > 0}
updated_seqr_project_guids = set()
updated_seqr_family_guids = set()
updated_seqr_individual_guids = set()
for source_project in tqdm(projects, unit=" projects"):
counters['source_projects'] += 1
print("Project: " + source_project.project_id)
# compute sample_type for this project
project_names = ("%s|%s" % (source_project.project_id, source_project.project_name)).lower()
if "wgs" in project_names or "genome" in source_project.project_id.lower() or source_project.project_id.lower() in wgs_project_ids:
sample_type = SeqrSample.SAMPLE_TYPE_WGS
counters['wgs_projects'] += 1
elif "rna-seq" in project_names:
sample_type = SeqrSample.SAMPLE_TYPE_RNA
counters['rna_projects'] += 1
else:
sample_type = SeqrSample.SAMPLE_TYPE_WES
counters['wes_projects'] += 1
# transfer Project data
new_project, project_created = transfer_project(source_project)
updated_seqr_project_guids.add(new_project.guid)
if project_created: counters['projects_created'] += 1
# transfer Families and Individuals
source_family_id_to_new_family = {}
for source_family in Family.objects.filter(project=source_project):
new_family, family_created = transfer_family(
source_family, new_project)
updated_seqr_family_guids.add(new_family.guid)
if family_created: counters['families_created'] += 1
source_family_id_to_new_family[source_family.id] = new_family
for source_individual in Individual.objects.filter(family=source_family):
new_individual, individual_created, phenotips_data_retrieved = transfer_individual(
source_individual, new_family, new_project, connect_to_phenotips
)
updated_seqr_individual_guids.add(new_individual.guid)
if individual_created: counters['individuals_created'] += 1
if phenotips_data_retrieved: counters['individuals_data_retrieved_from_phenotips'] += 1
if source_individual.combined_individuals_info:
combined_individuals_info = json.loads(source_individual.combined_individuals_info)
"""
combined_individuals_info json is expected to look like:
{
'WES' : {
'project_id': from_project.project_id,
'family_id': from_f.family_id,
'indiv_id': from_i.indiv_id
},
'WGS' : {
'project_id': from_project.project_id,
'family_id': from_f.family_id,
'indiv_id': from_i.indiv_id
},
'RNA' : {
'project_id': from_project.project_id,
'family_id': from_f.family_id,
'indiv_id': from_i.indiv_id
},
}
"""
for i, sample_type_i, combined_individuals_info_i in enumerate(combined_individuals_info.items()):
source_project_i = Project.objects.get(project_id=combined_individuals_info_i['project_id'])
#source_family_i = Project.objects.get(project_id=combined_individuals_info_i['family_id'])
source_individual_i = Project.objects.get(project_id=combined_individuals_info_i['indiv_id'])
create_sample_records(sample_type_i, source_project_i, source_individual_i, new_project, new_individual, counters)
else:
create_sample_records(sample_type, source_project, source_individual, new_project, new_individual, counters)
#combined_families_info.update({from_project_datatype: {'project_id': from_project.project_id, 'family_id': from_f.family_id}})
# TODO family groups, cohorts
for source_variant_tag_type in ProjectTag.objects.filter(project=source_project).order_by('order'):
new_variant_tag_type, created = get_or_create_variant_tag_type(
source_variant_tag_type, new_project)
for source_variant_tag in VariantTag.objects.filter(project_tag=source_variant_tag_type):
new_family = source_family_id_to_new_family.get(source_variant_tag.family.id if source_variant_tag.family else None)
new_variant_tag, variant_tag_created = get_or_create_variant_tag(
source_variant_tag,
new_family,
new_variant_tag_type
)
if variant_tag_created: counters['variant_tags_created'] += 1
for source_variant_note in VariantNote.objects.filter(project=source_project):
new_family = source_family_id_to_new_family.get(source_variant_note.family.id if source_variant_note.family else None)
new_variant_note, variant_note_created = get_or_create_variant_note(
source_variant_note,
new_project,
new_family
)
if variant_note_created: counters['variant_notes_created'] += 1
# delete projects that are in SeqrIndividual table, but not in BaseProject table
for deprecated_project_id in project_ids_to_process:
for indiv in SeqrIndividual.objects.filter(family__project__deprecated_project_id=deprecated_project_id):
if indiv.guid not in updated_seqr_individual_guids:
print("Deleting SeqrIndividual: %s" % indiv)
indiv.delete()
# delete projects that are in SeqrFamily table, but not in BaseProject table
for f in SeqrFamily.objects.filter(project__deprecated_project_id=deprecated_project_id):
if f.guid not in updated_seqr_family_guids:
print("Deleting SeqrFamily: %s" % f)
f.delete()
# delete projects that are in SeqrProject table, but not in BaseProject table
#for p in SeqrProject.objects.filter():
# if p.guid not in updated_seqr_project_guids:
# while True:
# i = raw_input('Delete SeqrProject %s? [Y/n]' % p.guid)
# if i == 'Y':
# p.delete()
# else:
# print("Keeping %s .." % p.guid)
# break
logger.info("Done")
logger.info("Stats: ")
for k, v in counters.items():
logger.info(" %s: %s" % (k, v))
def create_sample_records(sample_type, source_project, source_individual, new_project, new_individual, counters):
vcf_files = [f for f in source_individual.vcf_files.all()]
vcf_path = None
if len(vcf_files) > 0:
# get the most recent VCF file (the one with the highest primary key
vcf_files_max_pk = max([f.pk for f in vcf_files])
vcf_path = [f.file_path for f in vcf_files if f.pk == vcf_files_max_pk][0]
if vcf_path:
new_sample, sample_created = get_or_create_sample(
source_individual,
new_individual,
sample_type=sample_type
)
if sample_created: counters['samples_created'] += 1
new_vcf_dataset, vcf_dataset_created = get_or_create_dataset(
new_sample,
new_project,
source_individual,
vcf_path,
analysis_type=SeqrDataset.ANALYSIS_TYPE_VARIANT_CALLS,
)
if source_individual.bam_file_path:
new_bam_dataset, bam_dataset_created = get_or_create_dataset(
new_sample,
new_project,
source_individual,
source_individual.bam_file_path,
analysis_type=SeqrDataset.ANALYSIS_TYPE_ALIGNMENT,
)
def update_model_field(model, field_name, new_value):
"""Updates the given field if the new value is different from it's current value.
Args:
model: django ORM model
field_name: name of field to update
new_value: The new value to set the field to
"""
if not hasattr(model, field_name):
raise ValueError("model %s doesn't have the field %s" % (model, field_name))
if getattr(model, field_name) != new_value:
setattr(model, field_name, new_value)
if field_name != 'phenotips_data':
print("Setting %s.%s = %s" % (model.__class__.__name__.encode('utf-8'), field_name.encode('utf-8'), unicode(new_value).encode('utf-8')))
model.save()
def transfer_project(source_project):
"""Transfers the given project and returns the new project"""
# create project
new_project, created = SeqrProject.objects.get_or_create(
deprecated_project_id=source_project.project_id.strip(),
)
if created:
print("Created SeqrSample", new_project)
update_model_field(new_project, 'guid', new_project._compute_guid()[:ModelWithGUID.MAX_GUID_SIZE])
update_model_field(new_project, 'name', (source_project.project_name or source_project.project_id).strip())
update_model_field(new_project, 'description', source_project.description)
update_model_field(new_project, 'deprecated_last_accessed_date', source_project.last_accessed_date)
for p in source_project.private_reference_populations.all():
new_project.custom_reference_populations.add(p)
if source_project.project_id not in settings.PROJECTS_WITHOUT_PHENOTIPS:
update_model_field(new_project, 'is_phenotips_enabled', True)
update_model_field(new_project, 'phenotips_user_id', source_project.project_id)
else:
new_project.is_phenotips_enabled = False
if source_project.project_id in settings.PROJECTS_WITH_MATCHMAKER:
update_model_field(new_project, 'is_mme_enabled', True)
update_model_field(new_project, 'mme_primary_data_owner', settings.MME_PATIENT_PRIMARY_DATA_OWNER[source_project.project_id])
else:
new_project.is_mme_enabled = False
new_project.save()
# grant gene list CAN_VIEW permissions to project collaborators
for source_gene_list in source_project.gene_lists.all():
try:
locus_list = LocusList.objects.get(
created_by=source_gene_list.owner,
name=source_gene_list.name or source_gene_list.slug,
is_public=source_gene_list.is_public,
)
except ObjectDoesNotExist as e:
raise Exception('LocusList "%s" not found. Please run `python manage.py transfer_gene_lists`' % (
source_gene_list.name or source_gene_list.slug))
except MultipleObjectsReturned as e:
logger.error("Multiple LocusLists with owner '%s' and name '%s'" % (
source_gene_list.owner, (source_gene_list.name or source_gene_list.slug))
)
continue
assign_perm(user_or_group=new_project.can_view_group, perm=CAN_VIEW, obj=locus_list)
# add collaborators to new_project.can_view_group and/or can_edit_group
collaborator_user_ids = set()
for collaborator in ProjectCollaborator.objects.filter(project=source_project):
collaborator_user_ids.add( collaborator.user.id )
if collaborator.collaborator_type == 'manager':
new_project.can_edit_group.user_set.add(collaborator.user)
new_project.can_view_group.user_set.add(collaborator.user)
elif collaborator.collaborator_type == 'collaborator':
new_project.can_view_group.user_set.add(collaborator.user)
new_project.can_edit_group.user_set.remove(collaborator.user)
else:
raise ValueError("Unexpected collaborator_type: %s" % collaborator.collaborator_type)
for user in new_project.can_edit_group.user_set.all():
if user.id not in collaborator_user_ids:
new_project.can_view_group.user_set.remove(user)
new_project.can_edit_group.user_set.remove(user)
new_project.owners_group.user_set.remove(user)
print("REMOVED user %s permissions from project %s" % (user, new_project))
return new_project, created
def transfer_family(source_family, new_project):
"""Transfers the given family and returns the new family"""
#new_project.created_date.microsecond = random.randint(0, 10**6 - 1)
new_family, created = SeqrFamily.objects.get_or_create(project=new_project, family_id=source_family.family_id)
if created:
print("Created SeqrSample", new_family)
update_model_field(new_family, 'display_name', source_family.family_name or source_family.family_id)
update_model_field(new_family, 'description', source_family.short_description)
update_model_field(new_family, 'pedigree_image', source_family.pedigree_image)
update_model_field(new_family, 'analysis_notes', source_family.about_family_content)
update_model_field(new_family, 'analysis_summary', source_family.analysis_summary_content)
update_model_field(new_family, 'causal_inheritance_mode', source_family.causal_inheritance_mode)
update_model_field(new_family, 'analysis_status', source_family.analysis_status)
update_model_field(new_family, 'internal_case_review_notes', source_family.internal_case_review_notes)
update_model_field(new_family, 'internal_case_review_summary', source_family.internal_case_review_summary)
return new_family, created
def transfer_individual(source_individual, new_family, new_project, connect_to_phenotips):
"""Transfers the given Individual and returns the new Individual"""
new_individual, created = SeqrIndividual.objects.get_or_create(family=new_family, individual_id=source_individual.indiv_id)
if created:
print("Created SeqrSample", new_individual)
# get rid of '.' to signify 'unknown'
if source_individual.paternal_id == "." or source_individual.maternal_id == "." or source_individual.gender == "." or source_individual.affected == ".":
if source_individual.paternal_id == ".":
source_individual.paternal_id = ""
if source_individual.maternal_id == ".":
source_individual.maternal_id = ""
if source_individual.affected == ".":
source_individual.affected = ""
if source_individual.gender == ".":
source_individual.gender = ""
source_individual.save()
update_model_field(new_individual, 'created_date', source_individual.created_date)
update_model_field(new_individual, 'maternal_id', source_individual.maternal_id)
update_model_field(new_individual, 'paternal_id', source_individual.paternal_id)
update_model_field(new_individual, 'sex', source_individual.gender)
update_model_field(new_individual, 'affected', source_individual.affected)
update_model_field(new_individual, 'display_name', source_individual.nickname or source_individual.indiv_id)
#update_model_field(new_individual, 'notes', source_individual.notes) <-- notes exist only in the new SeqrIndividual schema. other_notes was never really used
update_model_field(new_individual, 'case_review_status', source_individual.case_review_status)
update_model_field(new_individual, 'case_review_status_accepted_for', source_individual.case_review_status_accepted_for)
update_model_field(new_individual, 'phenotips_eid', source_individual.phenotips_id)
update_model_field(new_individual, 'phenotips_data', source_individual.phenotips_data)
# transfer PhenoTips data
phenotips_data_retrieved = False
if connect_to_phenotips and new_project.is_phenotips_enabled:
_retrieve_and_update_individual_phenotips_data(new_project, new_individual)
phenotips_data_retrieved = True
# transfer MME data
if new_project.is_mme_enabled:
mme_data_for_individual = list(
settings.SEQR_ID_TO_MME_ID_MAP.find(
{'seqr_id': new_individual.individual_id}
).sort(
'insertion_date', pymongo.DESCENDING
)
)
if mme_data_for_individual:
submitted_data = mme_data_for_individual[0]['submitted_data']
if submitted_data:
new_individual.mme_submitted_data = json.dumps(submitted_data, default=json_util.default)
new_individual.mme_id = submitted_data['patient']['id']
new_individual.save()
return new_individual, created, phenotips_data_retrieved
def _retrieve_and_update_individual_phenotips_data(project, individual):
"""Retrieve and update the phenotips_data and phenotips_patient_id fields for the given Individual
Args:
project (Model): Project model
individual (Model): Individual model
"""
try:
latest_phenotips_json = phenotips_api.get_patient_data(
project,
individual.phenotips_eid,
is_external_id=True
)
except phenotips_api.PhenotipsException as e:
print("Couldn't retrieve latest data from phenotips for %s: %s" % (individual, e))
return
_update_individual_phenotips_data(individual, latest_phenotips_json)
def get_or_create_sample(source_individual, new_individual, sample_type):
"""Creates and returns a new Sample based on the provided models."""
new_sample, created = SeqrSample.objects.get_or_create(
sample_type=sample_type,
individual=new_individual,
sample_id=(source_individual.vcf_id or source_individual.indiv_id).strip(),
sample_status=source_individual.coverage_status,
deprecated_base_project=source_individual.family.project,
created_date=new_individual.created_date,
)
return new_sample, created
def get_or_create_dataset(new_sample, new_project, source_individual, source_file_path, analysis_type):
new_dataset, created = SeqrDataset.objects.get_or_create(
analysis_type=analysis_type,
source_file_path=source_file_path,
created_date=new_sample.individual.family.project.created_date,
project=new_project,
)
if source_individual.is_loaded():
new_dataset.is_loaded=True
if not new_dataset.loaded_date:
new_dataset.loaded_date = look_up_loaded_date(source_individual)
new_dataset.save()
new_dataset.samples.add(new_sample)
#if created:
# SampleBatch permissions - handled same way as for gene lists, except - since SampleBatch
# currently can't be shared with more than one project, allow SampleBatch metadata to be
# edited by users with project CAN_EDIT permissions
# assign_perm(user_or_group=new_project.can_edit_group, perm=CAN_EDIT, obj=new_sample_batch)
# assign_perm(user_or_group=new_project.can_view_group, perm=CAN_VIEW, obj=new_sample_batch)
return new_dataset, created
def get_or_create_variant_tag_type(source_variant_tag_type, new_project):
new_variant_tag_type, created = SeqrVariantTagType.objects.get_or_create(
project=new_project,
name=source_variant_tag_type.tag,
description=source_variant_tag_type.title,
color=source_variant_tag_type.color,
order=source_variant_tag_type.order,
is_built_in=(source_variant_tag_type.order is not None),
)
return new_variant_tag_type, created
def get_or_create_variant_tag(source_variant_tag, new_family, new_variant_tag_type):
try:
# seqr allowed a user to tag the same variant multiple times, so check if
created = False
new_variant_tag = SeqrVariantTag.objects.get(
variant_tag_type=new_variant_tag_type,
genome_version=GENOME_VERSION_GRCh37,
xpos_start=source_variant_tag.xpos,
xpos_end=source_variant_tag.xpos,
ref=source_variant_tag.ref,
alt=source_variant_tag.alt,
family=new_family,
)
# TODO populate variant_annotation, variant_genotypes
new_variant_tag.search_parameters = source_variant_tag.search_url
new_variant_tag.save()
except ObjectDoesNotExist as e:
created = True
new_variant_tag=SeqrVariantTag.objects.create(
created_date=source_variant_tag.date_saved,
created_by=source_variant_tag.user,
variant_tag_type=new_variant_tag_type,
genome_version=GENOME_VERSION_GRCh37,
xpos_start=source_variant_tag.xpos,
xpos_end=source_variant_tag.xpos,
ref=source_variant_tag.ref,
alt=source_variant_tag.alt,
family=new_family,
) # TODO populate variant_annotation, variant_genotypes
return new_variant_tag, created
def get_or_create_variant_note(source_variant_note, new_project, new_family):
new_variant_note, created = SeqrVariantNote.objects.get_or_create(
created_date=source_variant_note.date_saved,
created_by=source_variant_note.user,
project=new_project,
note=source_variant_note.note,
genome_version=GENOME_VERSION_GRCh37,
xpos_start=source_variant_note.xpos,
xpos_end=source_variant_note.xpos,
ref=source_variant_note.ref,
alt=source_variant_note.alt,
search_parameters=source_variant_note.search_url,
family=new_family,
) # TODO populate variant_annotation, variant_genotypes
return new_variant_note, created
def look_up_loaded_date(source_individual):
"""Retrieve the data-loaded time for the given individual"""
# decode data loaded time
loaded_date = None
try:
datastore = get_datastore(source_individual.project.project_id)
family_collection = datastore._get_family_collection(
source_individual.project.project_id,
source_individual.family.family_id
)
if not family_collection:
logger.error("mongodb family collection not found for %s %s" % (
source_individual.project.project_id,
source_individual.family.family_id))
return
record = family_collection.find_one()
if record:
loaded_date = record['_id'].generation_time
logger.info("%s data-loaded date: %s" % (source_individual.project.project_id, loaded_date))
except Exception as e:
logger.error('Unable to look up loaded_date for %s' % (source_individual,))
logger.error(e)
return loaded_date
def get_seqr_project_from_base_project(base_project):
seqr_projects = SeqrProject.objects.filter(deprecated_project_id = base_project.project_id)
if len(seqr_projects) == 1:
return seqr_projects[0]
return None
def get_seqr_family_from_base_family(base_family):
seqr_families = SeqrFamily.objects.filter(family_id=base_family.family_id, project__deprecated_project_id=base_family.project.project_id)
if len(seqr_families) == 1:
return seqr_families[0]
return None
def get_seqr_individual_from_base_individual(base_individual):
seqr_individual = SeqrIndividual.objects.filter(
individual_id=base_individual.indiv_id,
family__family_id=base_individual.family.family_id,
family__project__deprecated_project_id=base_individual.family.project.project_id
)
if len(seqr_individual) == 1:
return seqr_individual[0]
return None
white space
from bson import json_util
import json
import logging
import pymongo
from tqdm import tqdm
import settings
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.db.models import Q
from guardian.shortcuts import assign_perm
from reference_data.models import GENOME_VERSION_GRCh37
from seqr.views.apis import phenotips_api
from seqr.views.apis.phenotips_api import _update_individual_phenotips_data
from xbrowse_server.base.models import \
Project, \
Family, \
FamilyGroup, \
Individual, \
VariantNote, \
ProjectTag, \
VariantTag, \
ProjectCollaborator, \
ReferencePopulation
from seqr.models import \
Project as SeqrProject, \
Family as SeqrFamily, \
Individual as SeqrIndividual, \
VariantTagType as SeqrVariantTagType, \
VariantTag as SeqrVariantTag, \
VariantNote as SeqrVariantNote, \
Sample as SeqrSample, \
Dataset as SeqrDataset, \
LocusList, \
CAN_EDIT, CAN_VIEW, ModelWithGUID
from xbrowse_server.mall import get_datastore, get_annotator
logger = logging.getLogger(__name__)
# switching to python3.6 will make this unnecessary as built-in python dictionaries will be ordered
from collections import OrderedDict, defaultdict
class OrderedDefaultDict(OrderedDict, defaultdict):
def __init__(self, default_factory=None, *args, **kwargs):
super(OrderedDefaultDict, self).__init__(*args, **kwargs)
self.default_factory = default_factory
class Command(BaseCommand):
help = 'Transfer projects to the new seqr schema'
def add_arguments(self, parser):
parser.add_argument('--reset-all-models', help='This flag causes all records to be cleared from the seqr schema\'s Project, Family, and Individual models before transferring data', action='store_true')
parser.add_argument('--dont-connect-to-phenotips', help='dont retrieve phenotips internal id and latest data', action='store_true')
parser.add_argument('-w', '--wgs-projects', help='text file that lists WGS project-ids - one per line')
parser.add_argument('project_id', nargs="*", help='Project(s) to transfer. If not specified, defaults to all projects.')
def handle(self, *args, **options):
"""transfer project"""
reset_all_models = options['reset_all_models']
connect_to_phenotips = not options['dont_connect_to_phenotips']
project_ids_to_process = options['project_id']
counters = OrderedDefaultDict(int)
if reset_all_models:
print("Dropping all records from SeqrProject, SeqrFamily, SeqrIndividual")
SeqrIndividual.objects.all().delete()
SeqrFamily.objects.all().delete()
SeqrProject.objects.all().delete()
# reset models that'll be regenerated
if not project_ids_to_process:
SeqrVariantTagType.objects.all().delete()
SeqrVariantTag.objects.all().delete()
SeqrVariantNote.objects.all().delete()
SeqrSample.objects.all().delete()
SeqrDataset.objects.all().delete()
if project_ids_to_process:
projects = Project.objects.filter(project_id__in=project_ids_to_process)
logging.info("Processing %s projects" % len(projects))
else:
projects = Project.objects.filter(
~Q(project_id__contains="DEPRECATED") &
~Q(project_name__contains="DEPRECATED") &
~Q(project_id__istartswith="temp") &
~Q(project_id__istartswith="test_")
)
logging.info("Processing all %s projects" % len(projects))
wgs_project_ids = {}
if options['wgs_projects']:
with open(options['wgs_projects']) as f:
wgs_project_ids = {line.strip().lower() for line in f if len(line.strip()) > 0}
updated_seqr_project_guids = set()
updated_seqr_family_guids = set()
updated_seqr_individual_guids = set()
for source_project in tqdm(projects, unit=" projects"):
counters['source_projects'] += 1
print("Project: " + source_project.project_id)
# compute sample_type for this project
project_names = ("%s|%s" % (source_project.project_id, source_project.project_name)).lower()
if "wgs" in project_names or "genome" in source_project.project_id.lower() or source_project.project_id.lower() in wgs_project_ids:
sample_type = SeqrSample.SAMPLE_TYPE_WGS
counters['wgs_projects'] += 1
elif "rna-seq" in project_names:
sample_type = SeqrSample.SAMPLE_TYPE_RNA
counters['rna_projects'] += 1
else:
sample_type = SeqrSample.SAMPLE_TYPE_WES
counters['wes_projects'] += 1
# transfer Project data
new_project, project_created = transfer_project(source_project)
updated_seqr_project_guids.add(new_project.guid)
if project_created: counters['projects_created'] += 1
# transfer Families and Individuals
source_family_id_to_new_family = {}
for source_family in Family.objects.filter(project=source_project):
new_family, family_created = transfer_family(
source_family, new_project)
updated_seqr_family_guids.add(new_family.guid)
if family_created: counters['families_created'] += 1
source_family_id_to_new_family[source_family.id] = new_family
for source_individual in Individual.objects.filter(family=source_family):
new_individual, individual_created, phenotips_data_retrieved = transfer_individual(
source_individual, new_family, new_project, connect_to_phenotips
)
updated_seqr_individual_guids.add(new_individual.guid)
if individual_created: counters['individuals_created'] += 1
if phenotips_data_retrieved: counters['individuals_data_retrieved_from_phenotips'] += 1
if source_individual.combined_individuals_info:
combined_individuals_info = json.loads(source_individual.combined_individuals_info)
"""
combined_individuals_info json is expected to look like:
{
'WES' : {
'project_id': from_project.project_id,
'family_id': from_f.family_id,
'indiv_id': from_i.indiv_id
},
'WGS' : {
'project_id': from_project.project_id,
'family_id': from_f.family_id,
'indiv_id': from_i.indiv_id
},
'RNA' : {
'project_id': from_project.project_id,
'family_id': from_f.family_id,
'indiv_id': from_i.indiv_id
},
}
"""
for i, sample_type_i, combined_individuals_info_i in enumerate(combined_individuals_info.items()):
source_project_i = Project.objects.get(project_id=combined_individuals_info_i['project_id'])
#source_family_i = Project.objects.get(project_id=combined_individuals_info_i['family_id'])
source_individual_i = Project.objects.get(project_id=combined_individuals_info_i['indiv_id'])
create_sample_records(sample_type_i, source_project_i, source_individual_i, new_project, new_individual, counters)
else:
create_sample_records(sample_type, source_project, source_individual, new_project, new_individual, counters)
#combined_families_info.update({from_project_datatype: {'project_id': from_project.project_id, 'family_id': from_f.family_id}})
# TODO family groups, cohorts
for source_variant_tag_type in ProjectTag.objects.filter(project=source_project).order_by('order'):
new_variant_tag_type, created = get_or_create_variant_tag_type(
source_variant_tag_type, new_project)
for source_variant_tag in VariantTag.objects.filter(project_tag=source_variant_tag_type):
new_family = source_family_id_to_new_family.get(source_variant_tag.family.id if source_variant_tag.family else None)
new_variant_tag, variant_tag_created = get_or_create_variant_tag(
source_variant_tag,
new_family,
new_variant_tag_type
)
if variant_tag_created: counters['variant_tags_created'] += 1
for source_variant_note in VariantNote.objects.filter(project=source_project):
new_family = source_family_id_to_new_family.get(source_variant_note.family.id if source_variant_note.family else None)
new_variant_note, variant_note_created = get_or_create_variant_note(
source_variant_note,
new_project,
new_family
)
if variant_note_created: counters['variant_notes_created'] += 1
# delete projects that are in SeqrIndividual table, but not in BaseProject table
for deprecated_project_id in project_ids_to_process:
for indiv in SeqrIndividual.objects.filter(family__project__deprecated_project_id=deprecated_project_id):
if indiv.guid not in updated_seqr_individual_guids:
print("Deleting SeqrIndividual: %s" % indiv)
indiv.delete()
# delete projects that are in SeqrFamily table, but not in BaseProject table
for f in SeqrFamily.objects.filter(project__deprecated_project_id=deprecated_project_id):
if f.guid not in updated_seqr_family_guids:
print("Deleting SeqrFamily: %s" % f)
f.delete()
# delete projects that are in SeqrProject table, but not in BaseProject table
#for p in SeqrProject.objects.filter():
# if p.guid not in updated_seqr_project_guids:
# while True:
# i = raw_input('Delete SeqrProject %s? [Y/n]' % p.guid)
# if i == 'Y':
# p.delete()
# else:
# print("Keeping %s .." % p.guid)
# break
logger.info("Done")
logger.info("Stats: ")
for k, v in counters.items():
logger.info(" %s: %s" % (k, v))
def create_sample_records(sample_type, source_project, source_individual, new_project, new_individual, counters):
vcf_files = [f for f in source_individual.vcf_files.all()]
vcf_path = None
if len(vcf_files) > 0:
# get the most recent VCF file (the one with the highest primary key
vcf_files_max_pk = max([f.pk for f in vcf_files])
vcf_path = [f.file_path for f in vcf_files if f.pk == vcf_files_max_pk][0]
if vcf_path:
new_sample, sample_created = get_or_create_sample(
source_individual,
new_individual,
sample_type=sample_type
)
if sample_created: counters['samples_created'] += 1
new_vcf_dataset, vcf_dataset_created = get_or_create_dataset(
new_sample,
new_project,
source_individual,
vcf_path,
analysis_type=SeqrDataset.ANALYSIS_TYPE_VARIANT_CALLS,
)
if source_individual.bam_file_path:
new_bam_dataset, bam_dataset_created = get_or_create_dataset(
new_sample,
new_project,
source_individual,
source_individual.bam_file_path,
analysis_type=SeqrDataset.ANALYSIS_TYPE_ALIGNMENT,
)
def update_model_field(model, field_name, new_value):
"""Updates the given field if the new value is different from it's current value.
Args:
model: django ORM model
field_name: name of field to update
new_value: The new value to set the field to
"""
if not hasattr(model, field_name):
raise ValueError("model %s doesn't have the field %s" % (model, field_name))
if getattr(model, field_name) != new_value:
setattr(model, field_name, new_value)
if field_name != 'phenotips_data':
print("Setting %s.%s = %s" % (model.__class__.__name__.encode('utf-8'), field_name.encode('utf-8'), unicode(new_value).encode('utf-8')))
model.save()
def transfer_project(source_project):
"""Transfers the given project and returns the new project"""
# create project
new_project, created = SeqrProject.objects.get_or_create(
deprecated_project_id=source_project.project_id.strip(),
)
if created:
print("Created SeqrSample", new_project)
update_model_field(new_project, 'guid', new_project._compute_guid()[:ModelWithGUID.MAX_GUID_SIZE])
update_model_field(new_project, 'name', (source_project.project_name or source_project.project_id).strip())
update_model_field(new_project, 'description', source_project.description)
update_model_field(new_project, 'deprecated_last_accessed_date', source_project.last_accessed_date)
for p in source_project.private_reference_populations.all():
new_project.custom_reference_populations.add(p)
if source_project.project_id not in settings.PROJECTS_WITHOUT_PHENOTIPS:
update_model_field(new_project, 'is_phenotips_enabled', True)
update_model_field(new_project, 'phenotips_user_id', source_project.project_id)
else:
new_project.is_phenotips_enabled = False
if source_project.project_id in settings.PROJECTS_WITH_MATCHMAKER:
update_model_field(new_project, 'is_mme_enabled', True)
update_model_field(new_project, 'mme_primary_data_owner', settings.MME_PATIENT_PRIMARY_DATA_OWNER[source_project.project_id])
else:
new_project.is_mme_enabled = False
new_project.save()
# grant gene list CAN_VIEW permissions to project collaborators
for source_gene_list in source_project.gene_lists.all():
try:
locus_list = LocusList.objects.get(
created_by=source_gene_list.owner,
name=source_gene_list.name or source_gene_list.slug,
is_public=source_gene_list.is_public,
)
except ObjectDoesNotExist as e:
raise Exception('LocusList "%s" not found. Please run `python manage.py transfer_gene_lists`' % (
source_gene_list.name or source_gene_list.slug))
except MultipleObjectsReturned as e:
logger.error("Multiple LocusLists with owner '%s' and name '%s'" % (
source_gene_list.owner, (source_gene_list.name or source_gene_list.slug))
)
continue
assign_perm(user_or_group=new_project.can_view_group, perm=CAN_VIEW, obj=locus_list)
# add collaborators to new_project.can_view_group and/or can_edit_group
collaborator_user_ids = set()
for collaborator in ProjectCollaborator.objects.filter(project=source_project):
collaborator_user_ids.add( collaborator.user.id )
if collaborator.collaborator_type == 'manager':
new_project.can_edit_group.user_set.add(collaborator.user)
new_project.can_view_group.user_set.add(collaborator.user)
elif collaborator.collaborator_type == 'collaborator':
new_project.can_view_group.user_set.add(collaborator.user)
new_project.can_edit_group.user_set.remove(collaborator.user)
else:
raise ValueError("Unexpected collaborator_type: %s" % collaborator.collaborator_type)
for user in new_project.can_edit_group.user_set.all():
if user.id not in collaborator_user_ids:
new_project.can_view_group.user_set.remove(user)
new_project.can_edit_group.user_set.remove(user)
new_project.owners_group.user_set.remove(user)
print("REMOVED user %s permissions from project %s" % (user, new_project))
return new_project, created
def transfer_family(source_family, new_project):
"""Transfers the given family and returns the new family"""
#new_project.created_date.microsecond = random.randint(0, 10**6 - 1)
new_family, created = SeqrFamily.objects.get_or_create(project=new_project, family_id=source_family.family_id)
if created:
print("Created SeqrSample", new_family)
update_model_field(new_family, 'display_name', source_family.family_name or source_family.family_id)
update_model_field(new_family, 'description', source_family.short_description)
update_model_field(new_family, 'pedigree_image', source_family.pedigree_image)
update_model_field(new_family, 'analysis_notes', source_family.about_family_content)
update_model_field(new_family, 'analysis_summary', source_family.analysis_summary_content)
update_model_field(new_family, 'causal_inheritance_mode', source_family.causal_inheritance_mode)
update_model_field(new_family, 'analysis_status', source_family.analysis_status)
update_model_field(new_family, 'internal_case_review_notes', source_family.internal_case_review_notes)
update_model_field(new_family, 'internal_case_review_summary', source_family.internal_case_review_summary)
return new_family, created
def transfer_individual(source_individual, new_family, new_project, connect_to_phenotips):
"""Transfers the given Individual and returns the new Individual"""
new_individual, created = SeqrIndividual.objects.get_or_create(family=new_family, individual_id=source_individual.indiv_id)
if created:
print("Created SeqrSample", new_individual)
# get rid of '.' to signify 'unknown'
if source_individual.paternal_id == "." or source_individual.maternal_id == "." or source_individual.gender == "." or source_individual.affected == ".":
if source_individual.paternal_id == ".":
source_individual.paternal_id = ""
if source_individual.maternal_id == ".":
source_individual.maternal_id = ""
if source_individual.affected == ".":
source_individual.affected = ""
if source_individual.gender == ".":
source_individual.gender = ""
source_individual.save()
update_model_field(new_individual, 'created_date', source_individual.created_date)
update_model_field(new_individual, 'maternal_id', source_individual.maternal_id)
update_model_field(new_individual, 'paternal_id', source_individual.paternal_id)
update_model_field(new_individual, 'sex', source_individual.gender)
update_model_field(new_individual, 'affected', source_individual.affected)
update_model_field(new_individual, 'display_name', source_individual.nickname or source_individual.indiv_id)
#update_model_field(new_individual, 'notes', source_individual.notes) <-- notes exist only in the new SeqrIndividual schema. other_notes was never really used
update_model_field(new_individual, 'case_review_status', source_individual.case_review_status)
update_model_field(new_individual, 'case_review_status_accepted_for', source_individual.case_review_status_accepted_for)
update_model_field(new_individual, 'phenotips_eid', source_individual.phenotips_id)
update_model_field(new_individual, 'phenotips_data', source_individual.phenotips_data)
# transfer PhenoTips data
phenotips_data_retrieved = False
if connect_to_phenotips and new_project.is_phenotips_enabled:
_retrieve_and_update_individual_phenotips_data(new_project, new_individual)
phenotips_data_retrieved = True
# transfer MME data
if new_project.is_mme_enabled:
mme_data_for_individual = list(
settings.SEQR_ID_TO_MME_ID_MAP.find(
{'seqr_id': new_individual.individual_id}
).sort(
'insertion_date', pymongo.DESCENDING
)
)
if mme_data_for_individual:
submitted_data = mme_data_for_individual[0]['submitted_data']
if submitted_data:
new_individual.mme_submitted_data = json.dumps(submitted_data, default=json_util.default)
new_individual.mme_id = submitted_data['patient']['id']
new_individual.save()
return new_individual, created, phenotips_data_retrieved
def _retrieve_and_update_individual_phenotips_data(project, individual):
"""Retrieve and update the phenotips_data and phenotips_patient_id fields for the given Individual
Args:
project (Model): Project model
individual (Model): Individual model
"""
try:
latest_phenotips_json = phenotips_api.get_patient_data(
project,
individual.phenotips_eid,
is_external_id=True
)
except phenotips_api.PhenotipsException as e:
print("Couldn't retrieve latest data from phenotips for %s: %s" % (individual, e))
return
_update_individual_phenotips_data(individual, latest_phenotips_json)
def get_or_create_sample(source_individual, new_individual, sample_type):
"""Creates and returns a new Sample based on the provided models."""
new_sample, created = SeqrSample.objects.get_or_create(
sample_type=sample_type,
individual=new_individual,
sample_id=(source_individual.vcf_id or source_individual.indiv_id).strip(),
sample_status=source_individual.coverage_status,
deprecated_base_project=source_individual.family.project,
created_date=new_individual.created_date,
)
return new_sample, created
def get_or_create_dataset(new_sample, new_project, source_individual, source_file_path, analysis_type):
new_dataset, created = SeqrDataset.objects.get_or_create(
analysis_type=analysis_type,
source_file_path=source_file_path,
created_date=new_sample.individual.family.project.created_date,
project=new_project,
)
if source_individual.is_loaded():
new_dataset.is_loaded=True
if not new_dataset.loaded_date:
new_dataset.loaded_date = look_up_loaded_date(source_individual)
new_dataset.save()
new_dataset.samples.add(new_sample)
#if created:
# SampleBatch permissions - handled same way as for gene lists, except - since SampleBatch
# currently can't be shared with more than one project, allow SampleBatch metadata to be
# edited by users with project CAN_EDIT permissions
# assign_perm(user_or_group=new_project.can_edit_group, perm=CAN_EDIT, obj=new_sample_batch)
# assign_perm(user_or_group=new_project.can_view_group, perm=CAN_VIEW, obj=new_sample_batch)
return new_dataset, created
def get_or_create_variant_tag_type(source_variant_tag_type, new_project):
new_variant_tag_type, created = SeqrVariantTagType.objects.get_or_create(
project=new_project,
name=source_variant_tag_type.tag,
description=source_variant_tag_type.title,
color=source_variant_tag_type.color,
order=source_variant_tag_type.order,
is_built_in=(source_variant_tag_type.order is not None),
)
return new_variant_tag_type, created
def get_or_create_variant_tag(source_variant_tag, new_family, new_variant_tag_type):
try:
# seqr allowed a user to tag the same variant multiple times, so check if
created = False
new_variant_tag = SeqrVariantTag.objects.get(
variant_tag_type=new_variant_tag_type,
genome_version=GENOME_VERSION_GRCh37,
xpos_start=source_variant_tag.xpos,
xpos_end=source_variant_tag.xpos,
ref=source_variant_tag.ref,
alt=source_variant_tag.alt,
family=new_family,
)
# TODO populate variant_annotation, variant_genotypes
new_variant_tag.search_parameters = source_variant_tag.search_url
new_variant_tag.save()
except ObjectDoesNotExist as e:
created = True
new_variant_tag=SeqrVariantTag.objects.create(
created_date=source_variant_tag.date_saved,
created_by=source_variant_tag.user,
variant_tag_type=new_variant_tag_type,
genome_version=GENOME_VERSION_GRCh37,
xpos_start=source_variant_tag.xpos,
xpos_end=source_variant_tag.xpos,
ref=source_variant_tag.ref,
alt=source_variant_tag.alt,
family=new_family,
) # TODO populate variant_annotation, variant_genotypes
return new_variant_tag, created
def get_or_create_variant_note(source_variant_note, new_project, new_family):
new_variant_note, created = SeqrVariantNote.objects.get_or_create(
created_date=source_variant_note.date_saved,
created_by=source_variant_note.user,
project=new_project,
note=source_variant_note.note,
genome_version=GENOME_VERSION_GRCh37,
xpos_start=source_variant_note.xpos,
xpos_end=source_variant_note.xpos,
ref=source_variant_note.ref,
alt=source_variant_note.alt,
search_parameters=source_variant_note.search_url,
family=new_family,
) # TODO populate variant_annotation, variant_genotypes
return new_variant_note, created
def look_up_loaded_date(source_individual):
"""Retrieve the data-loaded time for the given individual"""
# decode data loaded time
loaded_date = None
try:
datastore = get_datastore(source_individual.project.project_id)
family_collection = datastore._get_family_collection(
source_individual.project.project_id,
source_individual.family.family_id
)
if not family_collection:
logger.error("mongodb family collection not found for %s %s" % (
source_individual.project.project_id,
source_individual.family.family_id))
return
record = family_collection.find_one()
if record:
loaded_date = record['_id'].generation_time
logger.info("%s data-loaded date: %s" % (source_individual.project.project_id, loaded_date))
except Exception as e:
logger.error('Unable to look up loaded_date for %s' % (source_individual,))
logger.error(e)
return loaded_date
def get_seqr_project_from_base_project(base_project):
seqr_projects = SeqrProject.objects.filter(deprecated_project_id = base_project.project_id)
if len(seqr_projects) == 1:
return seqr_projects[0]
return None
def get_seqr_family_from_base_family(base_family):
seqr_families = SeqrFamily.objects.filter(family_id=base_family.family_id, project__deprecated_project_id=base_family.project.project_id)
if len(seqr_families) == 1:
return seqr_families[0]
return None
def get_seqr_individual_from_base_individual(base_individual):
seqr_individual = SeqrIndividual.objects.filter(
individual_id=base_individual.indiv_id,
family__family_id=base_individual.family.family_id,
family__project__deprecated_project_id=base_individual.family.project.project_id
)
if len(seqr_individual) == 1:
return seqr_individual[0]
return None
|
#!/usr/bin/python
'''
A Crappy HHVM Compiler
run with ./hhvm_build debian jessie 3.3
'''
import os
import sys
import yaml
import tempfile
import re
import shutil
import logging
import subprocess
import datetime
import glob
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(script_dir, 'lib', 'buildtools'))
from buildtools import *
from buildtools import os_utils, http
from buildtools.config import YAMLConfig
from buildtools.wrapper import CMake, FPM, configure_ccache, configure_cotire, configure_distcc
from buildtools.repo.git import GitRepository
from buildtools.posix.elf import ELFInfo
BOOST_LIBRARYDIR='/usr/lib/x86_64-linux-gnu'
def bool2yn(b):
return 'Y' if b else 'N'
def cleanDir(dir):
for the_file in os.listdir(dir):
file_path = os.path.join(dir, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
else:
shutil.rmtree(file_path)
except Exception, e:
log.error(e)
sys.exit(1)
def mkdirOrClear(dir):
if not os.path.isdir(dir):
log.info('Creating {}'.format(dir))
os.makedirs(dir)
else:
log.info('Clearing %s', dir)
cleanDir(dir)
def dictToTuples(inp):
return [(k, v) for k, v in inp.items()]
def handleIf(operators, pkg_cfg, var_replacements):
if len(operators):
for expr in operators:
(operator, args) = dictToTuples(expr)[0]
if isinstance(args, str):
args = args.split(' ')
args = [replace_vars(arg, var_replacements) for arg in args]
if operator == 'file-exists':
if not os.path.exists(args[0]):
return False
else:
log.warn('Invalid operator %s', operator)
return True
else:
return False
def handleChdir(operators, pkg_cfg, var_replacements):
if len(operators):
origpath = os.path.abspath(os.getcwd())
def jumpBack():
log.info('cd %s', origpath)
os.chdir(origpath)
for expr in operators:
(operator, args) = dictToTuples(expr)[0]
if isinstance(args, str):
args = args.split(' ')
args = [replace_vars(arg, var_replacements) for arg in args]
newdir = os.path.abspath(args[0])
if operator == 'dir':
if not os.path.isdir(newdir):
log.warn('Directory %s does not exist, cannot chdir.', newdir)
return (False, None)
else:
os.chdir(newdir)
log.info('cd %s', newdir)
return (True, jumpBack)
else:
log.warn('Invalid operator %s', operator)
log.info('cd %s', args[0])
return (True, jumpBack)
else:
return (False, None)
def RunCommandsIn(commandlist, pkg_cfg, var_replacements):
if len(commandlist) == 0:
return
with log:
for package_cmd in commandlist:
# Conditionals
if isinstance(package_cmd, dict):
result = None
postwork = None
if 'if' in package_cmd:
result = handleIf(package_cmd['if'], pkg_cfg, var_replacements)
if 'if-not' in package_cmd:
result = not handleIf(package_cmd['if-not'], pkg_cfg, var_replacements)
if 'chdir' in package_cmd:
result, origpath = handleChdir(package_cmd['chdir'], pkg_cfg, var_replacements)
if result is None:
continue
RunCommandsIn(package_cmd.get('then' if result else 'else', []), pkg_cfg, var_replacements)
if postwork is not None:
origpath()
continue
# Strings -> lists
if isinstance(package_cmd, str):
package_cmd = package_cmd.split(' ')
ccmd = [replace_vars(fragment, var_replacements) for fragment in package_cmd]
command = ccmd[0]
cmd(ccmd, echo=True, critical=True)
def aggregate(cfg, dir):
job_cfg = yaml.load(os.path.join(dir, 'package.yml'))
def CloneOrPull(id, uri, dir):
if not os.path.isdir(dir):
cmd(['git', 'clone', uri, dir], echo=True, show_output=True, critical=True)
else:
with os_utils.Chdir(dir):
cmd(['git', 'pull'], echo=True, show_output=True, critical=True)
with os_utils.Chdir(dir):
log.info('{} is now at commit {}.'.format(id, Git.GetCommit()))
if __name__ == '__main__':
import argparse
# logging.basicConfig(
# format='%(asctime)s [%(levelname)-8s]: %(message)s',
# datefmt='%m/%d/%Y %I:%M:%S %p',
# level=logging.INFO)
# # filename='logs/main.log',
# # filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
# console = logging.StreamHandler()
# console.setLevel(logging.INFO)
# logging.getLogger('').addHandler(console)
# log = IndentLogger()
d_cfg = {
'env': {
'distcc': {
'enabled': False,
'hosts': {}
},
'ccache': {
'enabled': False
},
'cotire': {
'enabled': False
},
'make': {
'jobs': 5,
'flags': []
},
'packaging': {
'enabled': True,
'repo-deploy': True,
'maintainer': 'Rob Nelson <nexisentertainment@gmail.com>',
'packages': {
'debian/jessie': True,
'debian/wheezy': False
},
}
},
'bin': {
'make': 'make',
'pump': 'distcc-pump',
'ccache': 'ccache',
'asm': 'cc',
'cc': 'gcc-4.8',
'cxx': 'g++-4.8'
},
'paths': {
'source': './hhvm_src',
'install': '/tmp/hhvm-install',
'package': '/tmp/hhvm-package'
}
}
argp = argparse.ArgumentParser(prog='hhvm_build', description='Build HHVM')
argp.add_argument('distro', type=str, help='Linux Distribution (deb, etc)')
argp.add_argument('release', type=str, help='OS Release codename (precise, etc)')
argp.add_argument('version', type=str, help='HHVM Version')
argp.add_argument('hhvm_job', type=str, help='HHVM Jenkins workspace')
argp.add_argument('-c', '--config', type=str, default='config.yml', help='YAML file to read configuration from.')
argp.add_argument('--disable-ccache', action='store_true')
argp.add_argument('--disable-distcc', action='store_true')
argp.add_argument('--disable-cotire', action='store_true')
argp.add_argument('--package-only', action='store_true')
argp.add_argument('--disable-packaging', action='store_true')
argp.add_argument('--disable-repo-deploy', action='store_true')
argp.add_argument('--disable-git-clean', action='store_true')
argp.add_argument('--force-rebuild', action='store_true')
args = argp.parse_args()
cfg = Config(args.config, d_cfg)
if args.disable_ccache:
cfg['env']['ccache']['enable'] = False
if args.disable_distcc:
cfg['env']['distcc']['enable'] = False
if args.disable_cotire:
cfg['env']['cotire']['enable'] = False
if args.disable_packaging:
cfg['env']['packaging']['enable'] = False
if args.disable_repo_deploy:
cfg['env']['packaging']['repo-deploy'] = False
DISTRO_NAME = args.distro
DISTRO_RELEASE = args.release
HHVMBUILD_DIR = os.getcwd()
DISTRO_DIR = os.path.join(HHVMBUILD_DIR, 'hhvm', DISTRO_NAME, DISTRO_RELEASE)
HHVM_VERSION = args.version
SOURCE_DIR = os.path.abspath(args.hhvm_job)
if not os.path.isdir(DISTRO_DIR):
logging.fatal('Directory {0} doesn\'t exist.'.format(DISTRO_DIR))
sys.exit(1)
if SOURCE_DIR is None or not os.path.isdir(SOURCE_DIR):
SOURCE_DIR = os.path.abspath(cfg.get('paths.source'))
if not os.path.isdir(SOURCE_DIR):
log.info('Source code not found. SOURCE_DIR={}'.format(SOURCE_DIR))
sys.exit(1)
INSTALL_DIR = os.path.abspath(cfg.get('paths.install'))
mkdirOrClear(INSTALL_DIR)
PACKAGE_DIR = os.path.abspath(cfg.get('paths.package'))
mkdirOrClear(PACKAGE_DIR)
NIGHTLY = False
DEVONLY = False
DEBUG = False
version_chunks = HHVM_VERSION.split('-')
new_version_chunks = []
for i in range(len(version_chunks)):
chunk = version_chunks[i]
if i == 1 and chunk == 'nightly':
NIGHTLY = True
# new_version_chunks += [chunk]
continue
else:
if chunk == 'dev':
DEVONLY = True
DEBUG = True
continue
if chunk == 'dbg':
DEBUG = True
continue
new_version_chunks += [chunk]
HHVM_VERSION = '-'.join(new_version_chunks)
log.info('HHVM Version {} - Debug: {}, Dev: {}, Nightly: {}'.format(HHVM_VERSION, bool2yn(DEBUG), bool2yn(DEVONLY), bool2yn(NIGHTLY)))
env_ext = {
'CC': cfg.get('bin.cc', 'gcc-4.8'),
'CXX': cfg.get('bin.cxx', 'g++-4.8'),
'ASM': cfg.get('bin.asm', 'cc'),
'CMAKE_INCLUDE_PATH': tempfile.NamedTemporaryFile(delete=False).name,
'CMAKE_LIBRARY_PATH': "/usr/lib/hhvm/",
'HPHP_HOME': SOURCE_DIR,
'MYSQL_UNIX_SOCK_ADDR': '/var/run/mysqld/mysqld.sock',
}
ENV.merge(env_ext)
cmake = CMake()
for k, v in cfg.get('env.cmake.flags', {}).items():
cmake.setFlag(k, v)
MAKE_FLAGS = cfg.get('env.make.flags', [])
cmake.setFlag('CMAKE_BUILD_TYPE', 'Debug' if DEBUG else 'Release')
cmake.setFlag('CMAKE_INSTALL_PREFIX', '/usr')
cmake.setFlag('BOOST_LIBRARYDIR', BOOST_LIBRARYDIR)
configure_ccache(cfg, cmake)
configure_distcc(cfg, cmake)
configure_cotire(cfg, cmake)
job_flag = '-j' + str(cfg.get('env.make.jobs', 1))
MAKE_FLAGS += [job_flag]
#NIGHTLY_DATE = datetime.datetime.utcnow().strftime('%Y.%m.%d')
# 20160102
NIGHTLY_DATE = datetime.datetime.utcnow().strftime('%Y%m%d')
VERSION=''
iteration = int(os.environ.get('BUILD_NUMBER', '1'))
NIGHTLY_DATE += '{:04d}'.format(iteration)
# End format: YYYYMMDDBB - BB being build number
repo = GitRepository(SOURCE_DIR, None)
with Chdir(SOURCE_DIR) as sourcedir:
with log.info('Compile environment:'):
cmd(['uname', '-a'], echo=False)
cmd(['lsb_release', '-a'], echo=False)
cmd(['git', 'log', '-n', '1', '--pretty=oneline'], echo=False)
hhvm_bin = os.path.join(SOURCE_DIR, 'hphp/hhvm/hhvm')
rebuild = args.force_rebuild
if not rebuild and not os.path.isfile(hhvm_bin):
log.warn('hhvm binaries not found.')
rebuild = True
if rebuild and args.package_only:
log.error('Nothing to package, aborting.')
sys.exit(1)
if rebuild:
with log.info('Preparing to compile...'):
branch = ''
if NIGHTLY:
branch = 'master'
REG_VERSION_CHUNK = re.compile(r'# define (HHVM_VERSION_[A-Z]+) (.*)$')
def get_version_chunk(line):
m = REG_VERSION_CHUNK.match(line.strip())
return m.group(2)
version_file = ''
#with open('hphp/system/idl/constants.idl.json', 'r') as f:
# version_file = f.read()
major=0
minor=0
patch=0
suffix=''
with open('hphp/runtime/version.h','r') as f:
#ifndef HHVM_VERSION_OVERRIDE
# define HHVM_VERSION_MAJOR 3
# define HHVM_VERSION_MINOR 15
# define HHVM_VERSION_PATCH 0
# define HHVM_VERSION_SUFFIX "-dev"
#endif
for line in f:
if line.startswith('# define HHVM_VERSION_MAJOR'):
major=int(get_version_chunk(line))
if line.startswith('# define HHVM_VERSION_MINOR'):
minor=int(get_version_chunk(line))
if line.startswith('# define HHVM_VERSION_PATCH'):
patch=int(get_version_chunk(line))
if line.startswith('# define HHVM_VERSION_SUFFIX'):
suffix=get_version_chunk(line).strip('"')
HHVM_VERSION='{0}.{1}.{2}{3}+{4}'.format(major,minor,patch,suffix,NIGHTLY_DATE)
log.info('HHVM Version set: %s',HHVM_VERSION)
cmake.setFlag('HHVM_VERSION_OVERRIDE',HHVM_VERSION)
#with open('hphp/system/idl/constants.idl.json', 'w') as f:
# f.write(REG_VERSION.sub('\1+' + NIGHTLY_DATE, version_file))
else:
branch = 'HHVM-{}'.format(HHVM_VERSION)
repo.quiet = False
repo.CheckForUpdates(remote='origin', branch=branch, quiet=False)
repo.Pull(remote='origin', branch=branch, cleanup=not args.disable_git_clean)
repo.UpdateSubmodules()
distro_info = os.path.join(HHVMBUILD_DIR, 'hhvm', DISTRO_NAME, DISTRO_RELEASE, 'package.yml')
distro_cfg = YAMLConfig(distro_info, template_dir='/', variables={
'SOURCE_DIR': SOURCE_DIR,
'DISTRO_DIR': os.path.join(HHVMBUILD_DIR, 'hhvm', DISTRO_NAME, DISTRO_RELEASE),
'HHVMBUILD_DIR': HHVMBUILD_DIR
})
pbsteps = distro_cfg.get('prebuild', [])
if pbsteps is None:
pbsteps = []
if len(pbsteps) > 0:
log.info('Performing prebuild steps...')
origpath = os.path.abspath(os.getcwd())
RunCommandsIn(distro_cfg.get('prebuild', []), distro_cfg, {
'SOURCE_DIR': SOURCE_DIR,
'DISTRO_DIR': os.path.join(HHVMBUILD_DIR, 'hhvm', DISTRO_NAME, DISTRO_RELEASE),
'HHVMBUILD_DIR': HHVMBUILD_DIR
})
os.chdir(origpath)
if not cmake.run(cfg.get('bin.cmake', 'cmake')):
sys.exit(1)
if not cmd([cfg.get('bin.make', 'make')] + MAKE_FLAGS, critical=True, echo=True):
sys.exit(1)
if not os.path.isfile(hhvm_bin):
log.critical(hhvm_bin + " doesn't exist")
sys.exit(1)
if cfg.get('env.packaging.enabled', False):
stdout, stderr = cmd_output('dpkg-architecture -qDEB_BUILD_GNU_TYPE'.split(), critical=True, echo=False)
ARCH = (stdout + stderr).strip()
for build_type in ['main', 'dev']:
if build_type == 'dev':
DEVONLY = True
if DEBUG:
continue
pkgname = 'hhvm'
skeletondirname = 'skeleton'
suffix = ''
if DEVONLY:
pkgname += '-dev'
suffix = '-dev'
if NIGHTLY:
pkgname += '-nightly'
if DEBUG and not DEVONLY:
pkgname += '-dbg'
with log.info('Packaging {}...'.format(pkgname)):
d_pkg_cfg = {
'make-workspace': [],
'fpm': {
'output-type': 'deb'
}
}
skeletondirname += suffix
pkginfo_dir = os.path.join(HHVMBUILD_DIR, 'hhvm', DISTRO_NAME, DISTRO_RELEASE + suffix)
pkg_cfg = Config(os.path.join(pkginfo_dir, 'package.yml'), d_pkg_cfg, template_dir='/', variables={
'SOURCE_DIR': SOURCE_DIR,
'DISTRO_DIR': DISTRO_DIR,
'INSTALL_DIR': INSTALL_DIR,
'PACKAGE_DIR': PACKAGE_DIR,
'DISTRO_DIR': DISTRO_DIR,
'SKELETON_DIR': os.path.join(HHVMBUILD_DIR, 'hhvm', DISTRO_NAME, skeletondirname),
'HHVMBUILD_DIR': HHVMBUILD_DIR,
'ARCH': ARCH
})
with Chdir(SOURCE_DIR) as sourcedir:
cmd([cfg.get('bin.make', 'make'), 'install', 'DESTDIR=' + INSTALL_DIR], critical=True)
if NIGHTLY:
version = NIGHTLY_DATE + '~' + ('debug' if DEBUG else 'release')
if DEBUG:
conflicts = replaces = ['hhvm', 'hhvm-nightly', 'hhvm-dbg']
else:
conflicts = replaces = ['hhvm', 'hhvm-dbg']
else:
if DEBUG:
conflicts = replaces = ['hhvm']
version = HHVM_VERSION + '~' + ('debug' if DEBUG else 'release')
if len(pkg_cfg.get('make-workspace', [])) > 0:
log.info('Prepping workspace{} for packaging...'.format(suffix))
RunCommandsIn(pkg_cfg.get('make-workspace', []), pkg_cfg, {})
package = ''
pkgVersion = ''
with Chdir(PACKAGE_DIR):
fpm = FPM()
fpm.input_type = 'dir'
fpm.output_type = 'deb'
with log.info('Loading ' + DISTRO_DIR + '/DEBIAN/control...'):
fpm.LoadControl(DISTRO_DIR + '/DEBIAN/control')
skeledir = os.path.join(HHVMBUILD_DIR, 'hhvm', DISTRO_NAME, skeletondirname)
with log.info('Loading {}...'.format(skeledir)):
fpm.LoadDebianDirectory(os.path.join(skeledir, "DEBIAN"))
with log.info('Figuring out version number...'):
while True:
pkgVersion = version
if iteration > 0:
pkgVersion += '-{}'.format(iteration)
package = os.path.join(HHVMBUILD_DIR, '{name}_{version}.{arch}.deb'.format(name=pkgname, version=pkgVersion, arch=fpm.architecture))
if not os.path.isfile(package):
break
log.warn('%s exists, increasing iterator and trying again.', package)
iterator += 1
log.info('package=%s', package)
if build_type != 'dev':
with log.info('Determining package dependencies...'):
shlib_data = os_utils.GetDpkgShlibs([PACKAGE_DIR + '/usr/bin/hhvm'])
fpm.dependencies = shlib_data['Depends']
log.info('fpm.dependencies = ' + repr(fpm.dependencies))
with log.info('Running FPM...'):
fpm.version = str(version)
fpm.maintainer = cfg.get('env.packaging.maintainer', 'NOT SET <lol@idk.local>')
fpm.name = pkgname
# fpm.provides = ['hhvm'+suffix]
fpm.conflicts = conflicts
if build_type != 'dev':
fpm.configs += ['etc/']
fpm.iteration = iteration
fpm.replaces = replaces
fpm.inputs = ['.']
fpm.workdir = PACKAGE_DIR
fpm.Build(str(package))
if cfg.get('env.packaging.repo-deploy', True):
with log.info('Adding package to repo...'):
cmd(['freight-add', package, 'apt/' + DISTRO_RELEASE], critical=True)
with log.info('Generating repository cache...'):
cmd(['freight-cache', '-p', '~/.gpass'])
with log.info('Serializing configuration for extension use...'):
ext_cfg = {
'hhvm_version': version,
'cmake_flags': cmake.flags,
'make_flags': MAKE_FLAGS,
'env_ext': env_ext,
}
with open('ext.cfg', 'w') as f:
yaml.dump(ext_cfg, f)
# extcfg = cfg.get('paths.exts',{})
# for name, extpath in extcfg:
# buildExt(name, extpath)
Disable Gold.
#!/usr/bin/python
'''
A Crappy HHVM Compiler
run with ./hhvm_build debian jessie 3.3
'''
import os
import sys
import yaml
import tempfile
import re
import shutil
import logging
import subprocess
import datetime
import glob
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(script_dir, 'lib', 'buildtools'))
from buildtools import *
from buildtools import os_utils, http
from buildtools.config import YAMLConfig
from buildtools.wrapper import CMake, FPM, configure_ccache, configure_cotire, configure_distcc
from buildtools.repo.git import GitRepository
from buildtools.posix.elf import ELFInfo
BOOST_LIBRARYDIR='/usr/lib/x86_64-linux-gnu'
def bool2yn(b):
return 'Y' if b else 'N'
def cleanDir(dir):
for the_file in os.listdir(dir):
file_path = os.path.join(dir, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
else:
shutil.rmtree(file_path)
except Exception, e:
log.error(e)
sys.exit(1)
def mkdirOrClear(dir):
if not os.path.isdir(dir):
log.info('Creating {}'.format(dir))
os.makedirs(dir)
else:
log.info('Clearing %s', dir)
cleanDir(dir)
def dictToTuples(inp):
return [(k, v) for k, v in inp.items()]
def handleIf(operators, pkg_cfg, var_replacements):
if len(operators):
for expr in operators:
(operator, args) = dictToTuples(expr)[0]
if isinstance(args, str):
args = args.split(' ')
args = [replace_vars(arg, var_replacements) for arg in args]
if operator == 'file-exists':
if not os.path.exists(args[0]):
return False
else:
log.warn('Invalid operator %s', operator)
return True
else:
return False
def handleChdir(operators, pkg_cfg, var_replacements):
if len(operators):
origpath = os.path.abspath(os.getcwd())
def jumpBack():
log.info('cd %s', origpath)
os.chdir(origpath)
for expr in operators:
(operator, args) = dictToTuples(expr)[0]
if isinstance(args, str):
args = args.split(' ')
args = [replace_vars(arg, var_replacements) for arg in args]
newdir = os.path.abspath(args[0])
if operator == 'dir':
if not os.path.isdir(newdir):
log.warn('Directory %s does not exist, cannot chdir.', newdir)
return (False, None)
else:
os.chdir(newdir)
log.info('cd %s', newdir)
return (True, jumpBack)
else:
log.warn('Invalid operator %s', operator)
log.info('cd %s', args[0])
return (True, jumpBack)
else:
return (False, None)
def RunCommandsIn(commandlist, pkg_cfg, var_replacements):
if len(commandlist) == 0:
return
with log:
for package_cmd in commandlist:
# Conditionals
if isinstance(package_cmd, dict):
result = None
postwork = None
if 'if' in package_cmd:
result = handleIf(package_cmd['if'], pkg_cfg, var_replacements)
if 'if-not' in package_cmd:
result = not handleIf(package_cmd['if-not'], pkg_cfg, var_replacements)
if 'chdir' in package_cmd:
result, origpath = handleChdir(package_cmd['chdir'], pkg_cfg, var_replacements)
if result is None:
continue
RunCommandsIn(package_cmd.get('then' if result else 'else', []), pkg_cfg, var_replacements)
if postwork is not None:
origpath()
continue
# Strings -> lists
if isinstance(package_cmd, str):
package_cmd = package_cmd.split(' ')
ccmd = [replace_vars(fragment, var_replacements) for fragment in package_cmd]
command = ccmd[0]
cmd(ccmd, echo=True, critical=True)
def aggregate(cfg, dir):
job_cfg = yaml.load(os.path.join(dir, 'package.yml'))
def CloneOrPull(id, uri, dir):
if not os.path.isdir(dir):
cmd(['git', 'clone', uri, dir], echo=True, show_output=True, critical=True)
else:
with os_utils.Chdir(dir):
cmd(['git', 'pull'], echo=True, show_output=True, critical=True)
with os_utils.Chdir(dir):
log.info('{} is now at commit {}.'.format(id, Git.GetCommit()))
if __name__ == '__main__':
import argparse
# logging.basicConfig(
# format='%(asctime)s [%(levelname)-8s]: %(message)s',
# datefmt='%m/%d/%Y %I:%M:%S %p',
# level=logging.INFO)
# # filename='logs/main.log',
# # filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
# console = logging.StreamHandler()
# console.setLevel(logging.INFO)
# logging.getLogger('').addHandler(console)
# log = IndentLogger()
d_cfg = {
'env': {
'distcc': {
'enabled': False,
'hosts': {}
},
'ccache': {
'enabled': False
},
'cotire': {
'enabled': False
},
'make': {
'jobs': 5,
'flags': []
},
'packaging': {
'enabled': True,
'repo-deploy': True,
'maintainer': 'Rob Nelson <nexisentertainment@gmail.com>',
'packages': {
'debian/jessie': True,
'debian/wheezy': False
},
}
},
'bin': {
'make': 'make',
'pump': 'distcc-pump',
'ccache': 'ccache',
'asm': 'cc',
'cc': 'gcc-4.8',
'cxx': 'g++-4.8'
},
'paths': {
'source': './hhvm_src',
'install': '/tmp/hhvm-install',
'package': '/tmp/hhvm-package'
}
}
argp = argparse.ArgumentParser(prog='hhvm_build', description='Build HHVM')
argp.add_argument('distro', type=str, help='Linux Distribution (deb, etc)')
argp.add_argument('release', type=str, help='OS Release codename (precise, etc)')
argp.add_argument('version', type=str, help='HHVM Version')
argp.add_argument('hhvm_job', type=str, help='HHVM Jenkins workspace')
argp.add_argument('-c', '--config', type=str, default='config.yml', help='YAML file to read configuration from.')
argp.add_argument('--disable-ccache', action='store_true')
argp.add_argument('--disable-distcc', action='store_true')
argp.add_argument('--disable-cotire', action='store_true')
argp.add_argument('--package-only', action='store_true')
argp.add_argument('--disable-packaging', action='store_true')
argp.add_argument('--disable-repo-deploy', action='store_true')
argp.add_argument('--disable-git-clean', action='store_true')
argp.add_argument('--force-rebuild', action='store_true')
args = argp.parse_args()
cfg = Config(args.config, d_cfg)
if args.disable_ccache:
cfg['env']['ccache']['enable'] = False
if args.disable_distcc:
cfg['env']['distcc']['enable'] = False
if args.disable_cotire:
cfg['env']['cotire']['enable'] = False
if args.disable_packaging:
cfg['env']['packaging']['enable'] = False
if args.disable_repo_deploy:
cfg['env']['packaging']['repo-deploy'] = False
DISTRO_NAME = args.distro
DISTRO_RELEASE = args.release
HHVMBUILD_DIR = os.getcwd()
DISTRO_DIR = os.path.join(HHVMBUILD_DIR, 'hhvm', DISTRO_NAME, DISTRO_RELEASE)
HHVM_VERSION = args.version
SOURCE_DIR = os.path.abspath(args.hhvm_job)
if not os.path.isdir(DISTRO_DIR):
logging.fatal('Directory {0} doesn\'t exist.'.format(DISTRO_DIR))
sys.exit(1)
if SOURCE_DIR is None or not os.path.isdir(SOURCE_DIR):
SOURCE_DIR = os.path.abspath(cfg.get('paths.source'))
if not os.path.isdir(SOURCE_DIR):
log.info('Source code not found. SOURCE_DIR={}'.format(SOURCE_DIR))
sys.exit(1)
INSTALL_DIR = os.path.abspath(cfg.get('paths.install'))
mkdirOrClear(INSTALL_DIR)
PACKAGE_DIR = os.path.abspath(cfg.get('paths.package'))
mkdirOrClear(PACKAGE_DIR)
NIGHTLY = False
DEVONLY = False
DEBUG = False
version_chunks = HHVM_VERSION.split('-')
new_version_chunks = []
for i in range(len(version_chunks)):
chunk = version_chunks[i]
if i == 1 and chunk == 'nightly':
NIGHTLY = True
# new_version_chunks += [chunk]
continue
else:
if chunk == 'dev':
DEVONLY = True
DEBUG = True
continue
if chunk == 'dbg':
DEBUG = True
continue
new_version_chunks += [chunk]
HHVM_VERSION = '-'.join(new_version_chunks)
log.info('HHVM Version {} - Debug: {}, Dev: {}, Nightly: {}'.format(HHVM_VERSION, bool2yn(DEBUG), bool2yn(DEVONLY), bool2yn(NIGHTLY)))
env_ext = {
'CC': cfg.get('bin.cc', 'gcc-4.8'),
'CXX': cfg.get('bin.cxx', 'g++-4.8'),
'ASM': cfg.get('bin.asm', 'cc'),
'CMAKE_INCLUDE_PATH': tempfile.NamedTemporaryFile(delete=False).name,
'CMAKE_LIBRARY_PATH': "/usr/lib/hhvm/",
'HPHP_HOME': SOURCE_DIR,
'MYSQL_UNIX_SOCK_ADDR': '/var/run/mysqld/mysqld.sock',
}
ENV.merge(env_ext)
cmake = CMake()
for k, v in cfg.get('env.cmake.flags', {}).items():
cmake.setFlag(k, v)
MAKE_FLAGS = cfg.get('env.make.flags', [])
cmake.setFlag('CMAKE_BUILD_TYPE', 'Debug' if DEBUG else 'Release')
cmake.setFlag('CMAKE_INSTALL_PREFIX', '/usr')
cmake.setFlag('BOOST_LIBRARYDIR', BOOST_LIBRARYDIR)
cmake.setFlag('ENABLE_LD_GOLD', "Off")
configure_ccache(cfg, cmake)
configure_distcc(cfg, cmake)
configure_cotire(cfg, cmake)
job_flag = '-j' + str(cfg.get('env.make.jobs', 1))
MAKE_FLAGS += [job_flag]
#NIGHTLY_DATE = datetime.datetime.utcnow().strftime('%Y.%m.%d')
# 20160102
NIGHTLY_DATE = datetime.datetime.utcnow().strftime('%Y%m%d')
VERSION=''
iteration = int(os.environ.get('BUILD_NUMBER', '1'))
NIGHTLY_DATE += '{:04d}'.format(iteration)
# End format: YYYYMMDDBBBB - BB being build number
repo = GitRepository(SOURCE_DIR, None)
with Chdir(SOURCE_DIR) as sourcedir:
with log.info('Compile environment:'):
cmd(['uname', '-a'], echo=False)
cmd(['lsb_release', '-a'], echo=False)
cmd(['git', 'log', '-n', '1', '--pretty=oneline'], echo=False)
hhvm_bin = os.path.join(SOURCE_DIR, 'hphp/hhvm/hhvm')
rebuild = args.force_rebuild
if not rebuild and not os.path.isfile(hhvm_bin):
log.warn('hhvm binaries not found.')
rebuild = True
if rebuild and args.package_only:
log.error('Nothing to package, aborting.')
sys.exit(1)
if rebuild:
with log.info('Preparing to compile...'):
branch = ''
if NIGHTLY:
branch = 'master'
REG_VERSION_CHUNK = re.compile(r'# define (HHVM_VERSION_[A-Z]+) (.*)$')
def get_version_chunk(line):
m = REG_VERSION_CHUNK.match(line.strip())
return m.group(2)
version_file = ''
#with open('hphp/system/idl/constants.idl.json', 'r') as f:
# version_file = f.read()
major=0
minor=0
patch=0
suffix=''
with open('hphp/runtime/version.h','r') as f:
#ifndef HHVM_VERSION_OVERRIDE
# define HHVM_VERSION_MAJOR 3
# define HHVM_VERSION_MINOR 15
# define HHVM_VERSION_PATCH 0
# define HHVM_VERSION_SUFFIX "-dev"
#endif
for line in f:
if line.startswith('# define HHVM_VERSION_MAJOR'):
major=int(get_version_chunk(line))
if line.startswith('# define HHVM_VERSION_MINOR'):
minor=int(get_version_chunk(line))
if line.startswith('# define HHVM_VERSION_PATCH'):
patch=int(get_version_chunk(line))
if line.startswith('# define HHVM_VERSION_SUFFIX'):
suffix=get_version_chunk(line).strip('"')
HHVM_VERSION='{0}.{1}.{2}{3}+{4}'.format(major,minor,patch,suffix,NIGHTLY_DATE)
log.info('HHVM Version set: %s',HHVM_VERSION)
cmake.setFlag('HHVM_VERSION_OVERRIDE',HHVM_VERSION)
#with open('hphp/system/idl/constants.idl.json', 'w') as f:
# f.write(REG_VERSION.sub('\1+' + NIGHTLY_DATE, version_file))
else:
branch = 'HHVM-{}'.format(HHVM_VERSION)
repo.quiet = False
repo.CheckForUpdates(remote='origin', branch=branch, quiet=False)
repo.Pull(remote='origin', branch=branch, cleanup=not args.disable_git_clean)
repo.UpdateSubmodules()
distro_info = os.path.join(HHVMBUILD_DIR, 'hhvm', DISTRO_NAME, DISTRO_RELEASE, 'package.yml')
distro_cfg = YAMLConfig(distro_info, template_dir='/', variables={
'SOURCE_DIR': SOURCE_DIR,
'DISTRO_DIR': os.path.join(HHVMBUILD_DIR, 'hhvm', DISTRO_NAME, DISTRO_RELEASE),
'HHVMBUILD_DIR': HHVMBUILD_DIR
})
pbsteps = distro_cfg.get('prebuild', [])
if pbsteps is None:
pbsteps = []
if len(pbsteps) > 0:
log.info('Performing prebuild steps...')
origpath = os.path.abspath(os.getcwd())
RunCommandsIn(distro_cfg.get('prebuild', []), distro_cfg, {
'SOURCE_DIR': SOURCE_DIR,
'DISTRO_DIR': os.path.join(HHVMBUILD_DIR, 'hhvm', DISTRO_NAME, DISTRO_RELEASE),
'HHVMBUILD_DIR': HHVMBUILD_DIR
})
os.chdir(origpath)
if not cmake.run(cfg.get('bin.cmake', 'cmake')):
sys.exit(1)
if not cmd([cfg.get('bin.make', 'make')] + MAKE_FLAGS, critical=True, echo=True):
sys.exit(1)
if not os.path.isfile(hhvm_bin):
log.critical(hhvm_bin + " doesn't exist")
sys.exit(1)
if cfg.get('env.packaging.enabled', False):
stdout, stderr = cmd_output('dpkg-architecture -qDEB_BUILD_GNU_TYPE'.split(), critical=True, echo=False)
ARCH = (stdout + stderr).strip()
for build_type in ['main', 'dev']:
if build_type == 'dev':
DEVONLY = True
if DEBUG:
continue
pkgname = 'hhvm'
skeletondirname = 'skeleton'
suffix = ''
if DEVONLY:
pkgname += '-dev'
suffix = '-dev'
if NIGHTLY:
pkgname += '-nightly'
if DEBUG and not DEVONLY:
pkgname += '-dbg'
with log.info('Packaging {}...'.format(pkgname)):
d_pkg_cfg = {
'make-workspace': [],
'fpm': {
'output-type': 'deb'
}
}
skeletondirname += suffix
pkginfo_dir = os.path.join(HHVMBUILD_DIR, 'hhvm', DISTRO_NAME, DISTRO_RELEASE + suffix)
pkg_cfg = Config(os.path.join(pkginfo_dir, 'package.yml'), d_pkg_cfg, template_dir='/', variables={
'SOURCE_DIR': SOURCE_DIR,
'DISTRO_DIR': DISTRO_DIR,
'INSTALL_DIR': INSTALL_DIR,
'PACKAGE_DIR': PACKAGE_DIR,
'DISTRO_DIR': DISTRO_DIR,
'SKELETON_DIR': os.path.join(HHVMBUILD_DIR, 'hhvm', DISTRO_NAME, skeletondirname),
'HHVMBUILD_DIR': HHVMBUILD_DIR,
'ARCH': ARCH
})
with Chdir(SOURCE_DIR) as sourcedir:
cmd([cfg.get('bin.make', 'make'), 'install', 'DESTDIR=' + INSTALL_DIR], critical=True)
if NIGHTLY:
version = NIGHTLY_DATE + '~' + ('debug' if DEBUG else 'release')
if DEBUG:
conflicts = replaces = ['hhvm', 'hhvm-nightly', 'hhvm-dbg']
else:
conflicts = replaces = ['hhvm', 'hhvm-dbg']
else:
if DEBUG:
conflicts = replaces = ['hhvm']
version = HHVM_VERSION + '~' + ('debug' if DEBUG else 'release')
if len(pkg_cfg.get('make-workspace', [])) > 0:
log.info('Prepping workspace{} for packaging...'.format(suffix))
RunCommandsIn(pkg_cfg.get('make-workspace', []), pkg_cfg, {})
package = ''
pkgVersion = ''
with Chdir(PACKAGE_DIR):
fpm = FPM()
fpm.input_type = 'dir'
fpm.output_type = 'deb'
with log.info('Loading ' + DISTRO_DIR + '/DEBIAN/control...'):
fpm.LoadControl(DISTRO_DIR + '/DEBIAN/control')
skeledir = os.path.join(HHVMBUILD_DIR, 'hhvm', DISTRO_NAME, skeletondirname)
with log.info('Loading {}...'.format(skeledir)):
fpm.LoadDebianDirectory(os.path.join(skeledir, "DEBIAN"))
with log.info('Figuring out version number...'):
while True:
pkgVersion = version
if iteration > 0:
pkgVersion += '-{}'.format(iteration)
package = os.path.join(HHVMBUILD_DIR, '{name}_{version}.{arch}.deb'.format(name=pkgname, version=pkgVersion, arch=fpm.architecture))
if not os.path.isfile(package):
break
log.warn('%s exists, increasing iterator and trying again.', package)
iterator += 1
log.info('package=%s', package)
if build_type != 'dev':
with log.info('Determining package dependencies...'):
shlib_data = os_utils.GetDpkgShlibs([PACKAGE_DIR + '/usr/bin/hhvm'])
fpm.dependencies = shlib_data['Depends']
log.info('fpm.dependencies = ' + repr(fpm.dependencies))
with log.info('Running FPM...'):
fpm.version = str(version)
fpm.maintainer = cfg.get('env.packaging.maintainer', 'NOT SET <lol@idk.local>')
fpm.name = pkgname
# fpm.provides = ['hhvm'+suffix]
fpm.conflicts = conflicts
if build_type != 'dev':
fpm.configs += ['etc/']
fpm.iteration = iteration
fpm.replaces = replaces
fpm.inputs = ['.']
fpm.workdir = PACKAGE_DIR
fpm.Build(str(package))
if cfg.get('env.packaging.repo-deploy', True):
with log.info('Adding package to repo...'):
cmd(['freight-add', package, 'apt/' + DISTRO_RELEASE], critical=True)
with log.info('Generating repository cache...'):
cmd(['freight-cache', '-p', '~/.gpass'])
with log.info('Serializing configuration for extension use...'):
ext_cfg = {
'hhvm_version': version,
'cmake_flags': cmake.flags,
'make_flags': MAKE_FLAGS,
'env_ext': env_ext,
}
with open('ext.cfg', 'w') as f:
yaml.dump(ext_cfg, f)
# extcfg = cfg.get('paths.exts',{})
# for name, extpath in extcfg:
# buildExt(name, extpath)
|
from __future__ import unicode_literals
from __future__ import absolute_import
import hashlib
import os
import random
import re
from string import Template
import sys
import threading
import time
from time import strftime, gmtime
import traceback
from io import open
import six
import six.moves.urllib.request
import six.moves.urllib.parse
import six.moves.urllib.error
from six.moves import range
import scalyr_agent.monitor_utils.annotation_config as annotation_config
from scalyr_agent.monitor_utils.annotation_config import BadAnnotationConfig
from scalyr_agent.monitor_utils.blocking_rate_limiter import BlockingRateLimiter
import scalyr_agent.third_party.requests as requests
from scalyr_agent.util import StoppableThread
from scalyr_agent.json_lib import JsonObject
import scalyr_agent.scalyr_logging as scalyr_logging
import scalyr_agent.util as util
from scalyr_agent.compat import os_environ_unicode
global_log = scalyr_logging.getLogger(__name__)
# A regex for splitting a container id and runtime
_CID_RE = re.compile("^(.+)://(.+)$")
# endpoints used by the agent for querying the k8s api. Having this mapping allows
# us to avoid special casing the logic for each different object type. We can just
# look up the appropriate endpoint in this dict and query objects however we need.
#
# The dict is keyed by object kind, and or each object kind, there are 3 endpoints:
# single, list and list all.
#
# `single` is for querying a single object of a specific type
# `list` is for querying all objects of a given type in a specific namespace
# `list-all` is for querying all objects of a given type in the entire cluster
#
# the `single` and `list` endpoints are Templates that require the caller to substitute
# in the appropriate values for ${namespace} and ${name}
_OBJECT_ENDPOINTS = {
"CronJob": {
"single": Template(
"/apis/batch/v1beta1/namespaces/${namespace}/cronjobs/${name}"
),
"list": Template("/apis/batch/v1beta1/namespaces/${namespace}/cronjobs"),
"list-all": "/apis/batch/v1beta1/cronjobs",
},
"DaemonSet": {
"single": Template("/apis/apps/v1/namespaces/${namespace}/daemonsets/${name}"),
"list": Template("/apis/apps/v1/namespaces/${namespace}/daemonsets"),
"list-all": "/apis/apps/v1/daemonsets",
},
"Deployment": {
"single": Template("/apis/apps/v1/namespaces/${namespace}/deployments/${name}"),
"list": Template("/apis/apps/v1/namespaces/${namespace}/deployments"),
"list-all": "/apis/apps/v1/deployments",
},
"Job": {
"single": Template("/apis/batch/v1/namespaces/${namespace}/jobs/${name}"),
"list": Template("/apis/batch/v1/namespaces/${namespace}/jobs"),
"list-all": "/apis/batch/v1/jobs",
},
"Pod": {
"single": Template("/api/v1/namespaces/${namespace}/pods/${name}"),
"list": Template("/api/v1/namespaces/${namespace}/pods"),
"list-all": "/api/v1/pods",
},
"ReplicaSet": {
"single": Template("/apis/apps/v1/namespaces/${namespace}/replicasets/${name}"),
"list": Template("/apis/apps/v1/namespaces/${namespace}/replicasets"),
"list-all": "/apis/apps/v1/replicasets",
},
"ReplicationController": {
"single": Template(
"/api/v1/namespaces/${namespace}/replicationcontrollers/${name}"
),
"list": Template("/api/v1/namespaces/${namespace}/replicationcontrollers"),
"list-all": "/api/v1/replicationcontrollers",
},
"StatefulSet": {
"single": Template(
"/apis/apps/v1/namespaces/${namespace}/statefulsets/${name}"
),
"list": Template("/apis/apps/v1/namespaces/${namespace}/statefulsets"),
"list-all": "/apis/apps/v1/statefulsets",
},
}
# Template for an older kubelet endpoint that we may want to fall back to if the new one is unavailable due to an
# older kubernetes version
FALLBACK_KUBELET_URL_TEMPLATE = Template("http://${host_ip}:10255")
def cache(global_config):
"""
Returns the global k8s cache, configured using the options in `config`
@param config: The configuration
@type config: A Scalyr Configuration object
"""
# split comma delimited string of namespaces to ignore in to a list of strings
namespaces_to_ignore = []
for x in global_config.k8s_ignore_namespaces:
namespaces_to_ignore.append(x.strip())
cache_config = _CacheConfig(
api_url=global_config.k8s_api_url,
verify_api_queries=global_config.k8s_verify_api_queries,
cache_expiry_secs=global_config.k8s_cache_expiry_secs,
cache_expiry_fuzz_secs=global_config.k8s_cache_expiry_fuzz_secs,
cache_start_fuzz_secs=global_config.k8s_cache_start_fuzz_secs,
cache_purge_secs=global_config.k8s_cache_purge_secs,
query_timeout=global_config.k8s_cache_query_timeout_secs,
global_config=global_config,
)
# update the config and return current cache
_k8s_cache.update_config(cache_config)
return _k8s_cache
def terminate_agent_process(reason):
"""Terminate this agent process, causing the pod running the agent to restart the agent container.
:param reason: The termination reason which will be written in the K8s termination log and crash report.
:type reason: six.text_type
"""
try:
with open("/dev/termination-log", "w") as fp:
fp.write(reason)
finally:
sys.exit(1)
class K8sApiException(Exception):
"""A wrapper around Exception that makes it easier to catch k8s specific
exceptions
"""
def __init__(self, message, status_code=0):
super(K8sApiException, self).__init__(message)
self.status_code = status_code
class K8sApiTemporaryError(K8sApiException):
"""The base class for all temporary errors where a retry may result in success (timeouts, too many requests,
etc) returned when issuing requests to the K8s API server
"""
def __init__(self, message, status_code=0):
super(K8sApiTemporaryError, self).__init__(message, status_code=status_code)
class K8sApiPermanentError(K8sApiException):
"""The base class for all permanent errors where a retry will always fail until human action is taken
(authorization errors, object not found) returned when issuing requests to the K8s API server
"""
def __init__(self, message, status_code=0):
super(K8sApiPermanentError, self).__init__(message, status_code=status_code)
class K8sApiAuthorizationException(K8sApiPermanentError):
"""A wrapper around Exception that makes it easier to catch k8s authorization
exceptions
"""
def __init__(self, path, status_code=0):
super(K8sApiAuthorizationException, self).__init__(
"You don't have permission to access %s. Please ensure you have correctly configured the RBAC permissions for the scalyr-agent's service account"
% path,
status_code=status_code,
)
# K8sApiNotFoundException needs to be a TemporaryError because there are cases
# when a pod is starting up that querying the pods endpoint will return 404 Not Found
# but then the same query a few seconds later (once the pod is up and running) will return
# 200 - Ok. Having it derive from PermanentError would put it on a blacklist, when all we
# might want is to back off for a few seconds and try again
class K8sApiNotFoundException(K8sApiTemporaryError):
"""
A wrapper around Exception that makes it easier to catch not found errors when querying the k8s api
"""
def __init__(self, path, status_code=0):
super(K8sApiNotFoundException, self).__init__(
"The resource at location `%s` was not found" % path,
status_code=status_code,
)
class KubeletApiException(Exception):
"""A wrapper around Exception that makes it easier to catch k8s specific
exceptions
"""
pass
class QualifiedName(object):
"""
Represents a fully qualified name for a Kubernetes object using both its name and namespace.
"""
__slots__ = ("namespace", "name")
def __init__(self, namespace, name):
self.namespace = namespace
self.name = name
def __eq__(self, other):
return self.namespace == other.namespace and self.name == other.name
def __ne__(self, other):
return not self.__eq__(other)
def is_valid(self):
return self.namespace is not None and self.name is not None
class PodInfo(object):
"""
A collection class that stores label and other information about a kubernetes pod
"""
def __init__(
self,
name="",
namespace="",
uid="",
node_name="",
labels={},
container_names=[],
annotations={},
controller=None,
):
self.name = name
self.namespace = namespace
self.uid = uid
self.node_name = node_name
self.labels = labels
self.container_names = container_names
self.annotations = annotations
self.controller = controller # controller can't change for the life of the object so we don't include it in hash
# generate a hash we can use to compare whether or not
# any of the pod info has changed
md5 = hashlib.md5()
md5.update(name)
md5.update(namespace)
md5.update(uid)
md5.update(node_name)
# flatten the labels dict in to a single string because update
# expects a string arg. To avoid cases where the 'str' of labels is
# just the object id, we explicitly create a flattened string of
# key/value pairs
flattened = []
for k, v in six.iteritems(labels):
flattened.append(k)
flattened.append(v)
md5.update("".join(flattened))
# flatten the container names
# see previous comment for why flattening is necessary
md5.update("".join(container_names))
# flatten the annotations dict in to a single string
# see previous comment for why flattening is necessary
flattened = []
for k, v in six.iteritems(annotations):
flattened.append(k)
flattened.append(six.text_type(v))
md5.update("".join(flattened))
self.digest = md5.digest()
def exclude_pod(self, container_name=None, default=False):
"""
Returns whether or not this pod should be excluded based
on include/exclude annotations. If an annotation 'exclude' exists
then this will be returned. If an annotation 'include' exists, then
the boolean opposite of 'include' will be returned. 'include' will
always override 'exclude' if it exists.
param: container_name - if specified, and container_name exists in
the pod annotations, then the container specific annotations will
also be checked. These will supercede the pod level include/exclude
annotations
param: default - Boolean the default value if no annotations are found
return Boolean - whether or not to exclude this pod
"""
def exclude_status(annotations, default):
exclude = util.value_to_bool(annotations.get("exclude", default))
# include will always override value of exclude if both exist
exclude = not util.value_to_bool(annotations.get("include", not exclude))
return exclude
result = exclude_status(self.annotations, default)
if container_name and container_name in self.annotations:
result = exclude_status(self.annotations[container_name], result)
return result
class Controller(object):
"""
General class for all cached Controller objects
"""
def __init__(
self,
name="",
namespace="",
kind="",
parent_name=None,
parent_kind=None,
labels={},
):
self.name = name
self.namespace = namespace
self.kind = kind
self.access_time = None
self.parent_name = parent_name
self.parent_kind = parent_kind
flat_labels = []
for key, value in six.iteritems(labels):
flat_labels.append("%s=%s" % (key, value))
self.flat_labels = ",".join(flat_labels)
class ApiQueryOptions(object):
"""Options to use when querying the K8s Api server.
"""
def __init__(self, max_retries=3, return_temp_errors=True, rate_limiter=None):
"""
@param max_retries: The number of times we will retry a query if it receives a temporary error before failing.
@param return_temp_errors: If true, all non-known errors will automatically be categorized as temporary errors.
@param rate_limiter: Rate limiter for api calls
"""
self.max_retries = max_retries
self.return_temp_errors = return_temp_errors
self.rate_limiter = rate_limiter
def __repr__(self):
return (
"ApiQueryOptions\n\tmax_retries=%s\n\treturn_temp_errors=%s\n\trate_limiter=%s\n"
% (self.max_retries, self.return_temp_errors, self.rate_limiter)
)
class _K8sCache(object):
"""
A cached store of objects from a k8s api query
This is a private class to this module. See KubernetesCache which instantiates
instances of _K8sCache for querying different k8s API objects.
This abstraction is thread-safe-ish, assuming objects returned
from querying the cache are never written to.
"""
def __init__(self, processor, object_type):
"""
Initialises a Kubernetes Cache
@param processor: a _K8sProcessor object for querying/processing the k8s api
@param object_type: a string containing a textual name of the objects being cached, for use in log messages
"""
# protects self._objects and self._objects_expired
self._lock = threading.Lock()
# dict of object dicts. The outer dict is hashed by namespace,
# and the inner dict is hashed by object name
self._objects = {}
# Identical to self._objects but contains optional expired booleans for corresponding object
# New object won't have an entry. Only older objects that have been "soft purged" will be marked
# with a boolean (True).
# Note:
# Expirations should ideally be stored in the _objects dict itself alongside objects. However,
# the long-term direction for this feature is uncertain and so this is a temporary implementation
# needed to support the notion of a "soft purge".
self._objects_expired = {}
self._processor = processor
self._object_type = object_type
def shallow_copy(self):
"""Returns a shallow copy of all the cached objects dict"""
result = {}
self._lock.acquire()
try:
for k, v in six.iteritems(self._objects):
result[k] = v
finally:
self._lock.release()
return result
def __get_stale_objects(self, access_time):
"""Get all stale objects. Caller should first obtain lock on self._objects"""
stale = []
for namespace, objs in six.iteritems(self._objects):
for obj_name, obj in six.iteritems(objs):
if hasattr(obj, "access_time"):
if obj.access_time is None or obj.access_time < access_time:
stale.append((namespace, obj_name))
return stale
def mark_as_expired(self, access_time):
"""Mark all stale cache objects as expired
@param access_time: Any objects last accessed before access_time will be purged
"""
self._lock.acquire()
try:
stale = self.__get_stale_objects(access_time)
for (namespace, obj_name) in stale:
global_log.log(
scalyr_logging.DEBUG_LEVEL_1,
"Mark object %s/%s as expired in cache" % (namespace, obj_name),
)
expired_set = self._objects_expired.setdefault(namespace, {})
expired_set.setdefault(obj_name, True)
finally:
self._lock.release()
def purge_unused(self, access_time):
"""Removes any items from the store who haven't been accessed since `access_time`
@param access_time: Any objects last accessed before access_time will be purged
"""
self._lock.acquire()
try:
stale = self.__get_stale_objects(access_time)
for (namespace, obj_name) in stale:
global_log.log(
scalyr_logging.DEBUG_LEVEL_1,
"Removing object %s/%s from cache" % (namespace, obj_name),
)
self._objects[namespace].pop(obj_name, None)
self._objects_expired.get(namespace, {}).pop(obj_name, None)
finally:
self._lock.release()
def _update_object(
self, k8s, kind, namespace, name, current_time, query_options=None
):
"""update a single object, returns the object if found, otherwise return None
This method will always raise a K8SApiException upon k8s api response failure. It is the responsibility of the
caller to handle exceptions.
"""
result = None
# query k8s api and process objects
obj = k8s.query_object(kind, namespace, name, query_options=query_options)
result = self._processor.process_object(k8s, obj, query_options=query_options)
self._add_to_cache(result)
return result
def _add_to_cache(self, obj):
"""
Adds the object `obj` to the cache.
"""
# update our cache if we have an obj
if obj:
global_log.log(
scalyr_logging.DEBUG_LEVEL_2,
"Processing single %s: %s/%s"
% (self._object_type, obj.namespace, obj.name),
)
self._lock.acquire()
try:
# update the object
objects = self._objects.setdefault(obj.namespace, {})
objects[obj.name] = obj
# remove expired flag
expired_dict = self._objects_expired.setdefault(obj.namespace, {})
expired_dict.pop(obj.name, None)
finally:
self._lock.release()
def _lookup_object(
self, namespace, name, current_time, allow_expired=True, query_options=None
):
""" Look to see if the object specified by the namespace and name exists within the cached data.
Note: current_time should be provided (otherwise, access_time-based revalidation of cache won't work correctly,
for example, manifesting as unnecessary re-queries of controller metadata)
Return the object info, or None if not found
@param namespace: The object's namespace
@param name: The object's name
@param current_time last access time for the object to this value.
@param allow_expired: If true, return the object if it exists in the cache even if expired.
If false, return None if the object exists but is expired.
@type namespace: six.text_type
@type name: six.text_type
@type current_time: epoch seconds
@type allow_expired: bool
"""
result = None
self._lock.acquire()
try:
# Optionally check if the object has been marked as expired. If so, return None.
if not allow_expired:
expired = self._objects_expired.setdefault(namespace, {}).get(
name, False
)
if expired:
return None
objects = self._objects.get(namespace, {})
result = objects.get(name, None)
# update access time
if result is not None and current_time is not None:
result.access_time = current_time
finally:
self._lock.release()
return result
def is_cached(self, namespace, name, allow_expired):
"""Returns true if the specified object is in the cache and (optionally) not expired.
@param namespace: The object's namespace
@param name: The object's name
@param allow_expired: If True, an object is considered present in cache even if it is expired.
@type namespace: six.text_type
@type name: six.text_type
@type allow_expired: bool
@return: True if the object is cached. If check_expiration is True and an expiration
time exists for the object, then return True only if not expired
@rtype: bool
"""
# TODO: Look at passing down a consistent time from layers above
return (
self._lookup_object(
namespace, name, time.time(), allow_expired=allow_expired
)
is not None
)
def lookup(
self,
k8s,
current_time,
namespace,
name,
kind=None,
allow_expired=True,
query_options=None,
ignore_k8s_api_exception=False,
):
"""Returns info for the object specified by namespace and name or None if no object is found in the cache.
Querying the information is thread-safe, but the returned object should not be written to.
This method will propagate upwards K8SApiExceptions generated by k8s api response failure.
It is the responsibility of the caller to handle exceptions.
@param allow_expired: If True, an object is considered present in cache even if it is expired.
@type allow_expired: bool
"""
if kind is None:
kind = self._object_type
# see if the object exists in the cache and return it if so
result = self._lookup_object(
namespace,
name,
current_time,
allow_expired=allow_expired,
query_options=None,
)
if result:
global_log.log(
scalyr_logging.DEBUG_LEVEL_2,
"cache hit for %s %s/%s" % (kind, namespace, name),
)
return result
# we have a cache miss so query the object individually
global_log.log(
scalyr_logging.DEBUG_LEVEL_2,
"cache miss for %s %s/%s" % (kind, namespace, name),
)
result = None
try:
result = self._update_object(
k8s, kind, namespace, name, current_time, query_options=query_options
)
except K8sApiException:
if ignore_k8s_api_exception:
pass
else:
raise
return result
class _K8sProcessor(object):
"""
An abstract interface used by _K8sCache for querying a specific type of
object from the k8s api, and generating python objects from the queried result JSON.
"""
def _get_managing_controller(self, items):
"""
Processes a list of items, searching to see if one of them
is a 'managing controller', which is determined by the 'controller' field
@param items: an array containing 'ownerReferences' metadata for an object
returned from the k8s api
@return: A dict containing the managing controller of type `kind` or None if no such controller exists
"""
for i in items:
controller = i.get("controller", False)
if controller:
return i
return None
def process_object(self, k8s, obj, query_options=None):
"""
Creates a python object based of a dict
@param k8s: a KubernetesApi object
@param obj: A JSON dict returned as a response to querying
the k8s API for a specific object type.
@return a python object relevant to the
"""
raise NotImplementedError("process_object not implemented for _K8sProcessor")
class PodProcessor(_K8sProcessor):
def __init__(self, controllers):
super(PodProcessor, self).__init__()
self._controllers = controllers
def _get_controller_from_owners(self, k8s, owners, namespace, query_options=None):
"""
Processes a list of owner references returned from a Pod's metadata to see
if it is eventually owned by a Controller, and if so, returns the Controller object
@return Controller - a Controller object
"""
controller = None
# check if we are owned by another controller
owner = self._get_managing_controller(owners)
if owner is None:
return None
# make sure owner has a name field and a kind field
name = owner.get("name", None)
if name is None:
return None
kind = owner.get("kind", None)
if kind is None:
return None
# walk the parent until we get to the root controller
# Note: Parent controllers will always be in the same namespace as the child
current_time = time.time()
controller = self._controllers.lookup(
k8s,
current_time,
namespace,
name,
kind=kind,
query_options=query_options,
ignore_k8s_api_exception=True,
)
while controller:
if controller.parent_name is None:
global_log.log(
scalyr_logging.DEBUG_LEVEL_1,
"controller %s has no parent name" % controller.name,
)
break
if controller.parent_kind is None:
global_log.log(
scalyr_logging.DEBUG_LEVEL_1,
"controller %s has no parent kind" % controller.name,
)
break
# get the parent controller
parent_controller = self._controllers.lookup(
k8s,
current_time,
namespace,
controller.parent_name,
kind=controller.parent_kind,
query_options=query_options,
)
# if the parent controller doesn't exist, assume the current controller
# is the root controller
if parent_controller is None:
break
# walk up the chain
controller = parent_controller
return controller
def process_object(self, k8s, obj, query_options=None):
""" Generate a PodInfo object from a JSON object
@param k8s: a KubernetesApi object
@param pod: The JSON object returned as a response to querying
a specific pod from the k8s API
@return A PodInfo object
"""
result = {}
metadata = obj.get("metadata", {})
spec = obj.get("spec", {})
labels = metadata.get("labels", {})
annotations = metadata.get("annotations", {})
owners = metadata.get("ownerReferences", [])
pod_name = metadata.get("name", "")
namespace = metadata.get("namespace", "")
controller = self._get_controller_from_owners(
k8s, owners, namespace, query_options=query_options
)
container_names = []
for container in spec.get("containers", []):
container_names.append(container.get("name", "invalid-container-name"))
try:
annotations = annotation_config.process_annotations(annotations)
except BadAnnotationConfig as e:
global_log.warning(
"Bad Annotation config for %s/%s. All annotations ignored. %s"
% (namespace, pod_name, six.text_type(e)),
limit_once_per_x_secs=300,
limit_key="bad-annotation-config-%s"
% metadata.get("uid", "invalid-uid"),
)
annotations = JsonObject()
global_log.log(
scalyr_logging.DEBUG_LEVEL_2,
"Annotations: %s" % (six.text_type(annotations)),
)
# create the PodInfo
result = PodInfo(
name=pod_name,
namespace=namespace,
uid=metadata.get("uid", ""),
node_name=spec.get("nodeName", ""),
labels=labels,
container_names=container_names,
annotations=annotations,
controller=controller,
)
return result
class ControllerProcessor(_K8sProcessor):
def process_object(self, k8s, obj, query_options=None):
""" Generate a Controller object from a JSON object
@param k8s: a KubernetesApi object
@param obj: The JSON object returned as a response to querying
a specific controller from the k8s API
@return A Controller object
"""
metadata = obj.get("metadata", {})
kind = obj.get("kind", "")
owners = metadata.get("ownerReferences", [])
namespace = metadata.get("namespace", "")
name = metadata.get("name", "")
labels = metadata.get("labels", {})
parent_name = None
parent_kind = None
parent = self._get_managing_controller(owners)
if parent is not None:
parent_name = parent.get("name", None)
parent_kind = parent.get("kind", None)
return Controller(name, namespace, kind, parent_name, parent_kind, labels)
class _CacheConfig(object):
"""
Internal configuration options for the Kubernetes cache
"""
def __init__(
self,
api_url="https://kubernetes.default",
verify_api_queries=True,
cache_expiry_secs=30,
cache_purge_secs=300,
cache_expiry_fuzz_secs=0,
cache_start_fuzz_secs=0,
query_timeout=20,
global_config=None,
):
"""
@param api_url: the url for querying the k8s api
@param verify_api_queries: whether to verify queries to the k8s api
@param cache_expiry_secs: the number of secs to wait before updating the cache
@param cache_expiry_fuzz_secs: if greater than zero, the number of seconds to fuzz the expiration time to avoid query stampede
@param cache_start_fuzz_secs: if greater than zero, the number of seconds to fuzz the start time to avoid query stampede
@param cache_purge_secs: the number of seconds to wait before purging old controllers from the cache
@param query_timeout: The number of seconds to wait before a query to the API server times out
@param global_config: Global configuration object
@type api_url: str
@type verify_api_queries: bool
@type cache_expiry_secs: int or float
@type cache_expiry_fuzz_secs: int or float
@type cache_start_fuzz_secs: int or float
@type cache_purge_secs: int or float
@type query_timeout: int
@type global_config: Configurtion
"""
# NOTE: current implementations of __eq__ expects that fields set in contructor are only true state
# fields that affect equality. If this is ever changed, be sure to modify __eq__ accordingly
self.api_url = api_url
self.verify_api_queries = verify_api_queries
self.cache_expiry_secs = cache_expiry_secs
self.cache_expiry_fuzz_secs = cache_expiry_fuzz_secs
self.cache_start_fuzz_secs = cache_start_fuzz_secs
self.cache_purge_secs = cache_purge_secs
self.query_timeout = query_timeout
self.global_config = global_config
def __eq__(self, other):
"""Equivalence method for _CacheConfig objects so == testing works """
for key, val in self.__dict__.items():
if val != getattr(other, key):
return False
return True
def __ne__(self, other):
"""Non-Equivalence method for _CacheConfig objects because Python 2 doesn't
automatically generate != if == is defined
"""
# return result based on negation of `==` rather than negation of `__eq__`
return not (self == other)
def __repr__(self):
s = ""
for key, val in self.__dict__.items():
s += "\n\t%s: %s" % (key, val)
return s + "\n"
def need_new_k8s_object(self, new_config):
"""
Determines if a new KubernetesApi object needs to created for the cache based on the new config
@param new_config: The new config options
@type new_config: _CacheConfig
@return: True if a new KubernetesApi object should be created based on the differences between the
current and the new config. False otherwise.
"""
relevant_fields = ["api_url", "verify_api_queries", "query_timeout"]
relevant_global_config_fields = [
"agent_log_path",
"k8s_log_api_responses",
"k8s_log_api_exclude_200s",
"k8s_log_api_min_response_len",
"k8s_log_api_min_latency",
"k8s_log_api_ratelimit_interval",
]
# Verify the relevant CacheConfig fields are equal
for field in relevant_fields:
if getattr(self, field) != getattr(new_config, field):
return True
# If one of the sub global-config is null and the other isn't, return True
if bool(self.global_config) ^ bool(new_config.global_config):
return True
# If both sub global-configs are present, verify their relevant fields are equal
if self.global_config and new_config:
for field in relevant_global_config_fields:
if getattr(self.global_config, field) != getattr(
new_config.global_config, field
):
return True
return False
class _CacheConfigState(object):
"""
Class holding cache config related state
"""
class LocalState(object):
"""
Helper class containing copies of state information so that it can be used on
separate threads without worry of being changed by another thread.
"""
def __init__(self, state):
"""
Create a copy of the relevant parts of `state`
The caller should lock `state` before calling this method
@param state: the cache state
@type state: _CacheConfigState
"""
self.k8s = state.k8s
self.cache_expiry_secs = state.cache_config.cache_expiry_secs
self.cache_purge_secs = state.cache_config.cache_purge_secs
self.cache_expiry_fuzz_secs = state.cache_config.cache_expiry_fuzz_secs
self.cache_start_fuzz_secs = state.cache_config.cache_start_fuzz_secs
def __init__(self, cache_config, global_config):
"""Set default values"""
self._lock = threading.Lock()
self.k8s = None
self.cache_config = _CacheConfig(api_url="", global_config=global_config)
self._pending_config = None
self.configure(cache_config)
def copy_state(self):
"""
Get a copy of the relevant cache state in a thread-safe manner
@return: a copy of various state information, useful for the main processing thread
@rtype: LocalState
"""
self._lock.acquire()
try:
return self.LocalState(self)
finally:
self._lock.release()
def configure(self, new_cache_config):
"""
Configures the state based on any changes in the configuration.
Whenever a new configuration is detected, a new instance of KubernetesApi will be created.
The KubernetesApi however, will reference a named BlockingRateLimiter since we need to share
BlockingRateLimiters across monitors.
@param new_cache_config: the new configuration
@param global_config: global configuration object
@type new_cache_config: _CacheConfig
@type global_config: Configuration object
"""
# get old state values
old_state = self.copy_state()
need_new_k8s = False
self._lock.acquire()
try:
if self.cache_config == new_cache_config:
return
self._pending_config = new_cache_config
need_new_k8s = (
old_state.k8s is None
or self.cache_config.need_new_k8s_object(new_cache_config)
)
finally:
self._lock.release()
# create a new k8s api object if we need one
k8s = old_state.k8s
if need_new_k8s:
k8s = KubernetesApi.create_instance(
new_cache_config.global_config,
k8s_api_url=new_cache_config.api_url,
query_timeout=new_cache_config.query_timeout,
verify_api_queries=new_cache_config.verify_api_queries,
)
# update with new values
self._lock.acquire()
try:
# if new_config is not self._pending_config then it means a newer config
# came through on another thread before we finished this call and therefore
# we should avoid updating because we only want the most recent update to succeed.
# use 'is' rather than == because we want to see if they are the same object
# not if the objects are semantically identical
if new_cache_config is self._pending_config:
self.k8s = k8s
self.k8s.query_timeout = new_cache_config.query_timeout
self.cache_config = new_cache_config
self._pending_config = None
global_log.log(
scalyr_logging.DEBUG_LEVEL_1,
"Got new config %s",
six.text_type(self.cache_config),
)
finally:
self._lock.release()
class AgentPodNotReadyException(Exception):
"""Raised when the agent pod is not fully ready according to the K8s API server."""
pass
class KubernetesCache(object):
def __init__(
self,
api_url="https://kubernetes.default",
verify_api_queries=True,
cache_expiry_secs=30,
cache_expiry_fuzz_secs=0,
cache_start_fuzz_secs=0,
cache_purge_secs=300,
start_caching=True,
global_config=None,
):
self._lock = threading.Lock()
new_cache_config = _CacheConfig(
api_url=api_url,
verify_api_queries=verify_api_queries,
cache_expiry_secs=cache_expiry_secs,
cache_expiry_fuzz_secs=cache_expiry_fuzz_secs,
cache_start_fuzz_secs=cache_start_fuzz_secs,
cache_purge_secs=cache_purge_secs,
global_config=global_config,
)
# set the initial state
self._state = _CacheConfigState(new_cache_config, global_config)
# create the controller cache
self._controller_processor = ControllerProcessor()
self._controllers = _K8sCache(self._controller_processor, "<controller>")
# create the pod cache
self._pod_processor = PodProcessor(self._controllers)
self._pods_cache = _K8sCache(self._pod_processor, "Pod")
self._cluster_name = None
self._api_server_version = None
# The last time (in seconds since epoch) we updated the K8s version number via a query
self._last_api_server_version_update = 0
self._container_runtime = None
self._initialized = False
self._last_initialization_error = "Initialization not started"
self._thread = None
self._rate_limiter = None
if start_caching:
self.start()
def stop(self):
"""Stops the cache, specifically stopping the background thread that refreshes the cache"""
self._thread.stop()
def start(self):
"""
Starts the background thread that reads from the k8s cache
"""
if self._thread is None:
self._thread = StoppableThread(target=self.update_cache, name="K8S Cache")
self._thread.start()
def local_state(self):
"""
Returns a local copy of the current state
"""
return self._state.copy_state()
def update_config(self, new_cache_config):
"""
Updates the cache config
"""
self._state.configure(new_cache_config)
self._lock.acquire()
try:
if self._thread is None:
self.start()
finally:
self._lock.release()
def is_initialized(self):
"""Returns whether or not the k8s cache has been initialized with the full pod list"""
result = False
self._lock.acquire()
try:
result = self._initialized
finally:
self._lock.release()
return result
def last_initialization_error(self):
"""Returns the last error experienced while initializing the cache.
Returns None if it is initialized."""
self._lock.acquire()
try:
if self._initialized:
return None
else:
return self._last_initialization_error
finally:
self._lock.release()
def _update_initialization_error(self, component, message):
"""Updates the last initialization error message experienced.
:param component: The component of the cache being initialized when the error occurred
:param message: The error message
:type component: six.text_type
:type message: six.text_type
"""
self._lock.acquire()
try:
self._last_initialization_error = (
'Unable to initialize %s in K8s cache due to "%s"'
% (component, message)
)
finally:
self._lock.release()
def _update_cluster_name(self, k8s):
"""Updates the cluster name"""
cluster_name = k8s.get_cluster_name()
self._lock.acquire()
try:
self._cluster_name = cluster_name
finally:
self._lock.release()
def _update_api_server_version_if_necessary(self, k8s, current_time=None):
"""Update the API server version if it has not been successfully update in the last hour.
The version number is determined by querying the K8s API server."""
if current_time is None:
current_time = time.time()
# Check if we have the version set and what time we set it.
self._lock.acquire()
try:
is_version_set = self._api_server_version is not None
last_check_time = self._last_api_server_version_update
finally:
self._lock.release()
# We only update if we haven't updated it in the last hour.
if not is_version_set or current_time - last_check_time > 3600:
# Query the API server to get version.
gitver = k8s.get_api_server_version()
self._lock.acquire()
try:
self._api_server_version = gitver
self._last_api_server_version_update = current_time
finally:
self._lock.release()
def _get_runtime(self, k8s):
pod_name = k8s.get_pod_name()
pod = k8s.query_pod(k8s.namespace, pod_name)
if pod is None:
global_log.warning(
"Coud not determine K8s CRI because could not find agent pod: %s"
% pod_name,
limit_once_per_x_secs=300,
limit_key="k8s_cri_no_pod",
)
return None
status = pod.get("status", {})
containers = status.get("containerStatuses", [])
for container in containers:
name = container.get("name")
if name and name == "scalyr-agent":
# If the agent container is not ready (according the API server) we cannot get the containerID
# and therefore cannot determine the container runtime. We need to wait a little bit for the
# API server to catch up. We raise this exception which triggers the right things.
if not container.get("ready", False):
raise AgentPodNotReadyException()
containerId = container.get("containerID", "")
m = _CID_RE.match(containerId)
if m:
return m.group(1)
else:
global_log.warning(
"Coud not determine K8s CRI because agent container id did not match: %s"
% containerId,
limit_once_per_x_secs=300,
limit_key="k8s_cri_unmatched_container_id",
)
return None
global_log.warning(
"Coud not determine K8s CRI because could not find agent container in pod.",
limit_once_per_x_secs=300,
limit_key="k8s_cri_no_agent_container",
)
return None
def update_cache(self, run_state):
"""
Main thread for updating the k8s cache
"""
start_time = time.time()
retry_delay_secs = None
while run_state.is_running() and not self.is_initialized():
# get cache state values that will be consistent for the duration of the loop iteration
local_state = self._state.copy_state()
# Delay the start of this cache if we have fuzzing turned on. This will reduce the stampede of
# agents all querying the API master at the same time on large clusters (when the agents are started
# at the same time.)
if local_state.cache_start_fuzz_secs > 0:
run_state.sleep_but_awaken_if_stopped(
random.uniform(0, local_state.cache_start_fuzz_secs)
)
# Delay before reattempting to initialize the cache if we had an error last time.
if retry_delay_secs is not None:
run_state.sleep_but_awaken_if_stopped(retry_delay_secs)
if not run_state.is_running() or self.is_initialized():
continue
# Records which component is being initialized. Used in error messages below.
component = "cluster name"
try:
self._update_cluster_name(local_state.k8s)
component = "api version"
self._update_api_server_version_if_necessary(local_state.k8s)
component = "runtime"
runtime = self._get_runtime(local_state.k8s)
self._lock.acquire()
try:
self._container_runtime = runtime
self._initialized = True
self._last_initialization_error = None
finally:
self._lock.release()
except K8sApiException as e:
global_log.warn(
"K8s API exception while updating %s in K8s cache (will retry) - %s"
% (component, six.text_type(e)),
limit_once_per_x_secs=300,
limit_key="k8s_api_init_cache",
)
self._update_initialization_error(
component, "K8s API error %s" % six.text_type(e)
)
# Delay a fixed amount. TODO: Maybe do exponential backoff here?
retry_delay_secs = 0.5
except AgentPodNotReadyException:
global_log.info(
"Agent container not ready while initializing cache (will retry)",
limit_once_per_x_secs=60,
limit_key="k8s_agent_pod_not_ready",
)
self._update_initialization_error(
component, "Agent container not ready"
)
retry_delay_secs = 1.0
except Exception as e:
global_log.warn(
"Exception occurred when updating %s in K8s cache (will retry) - %s\n%s"
% (component, six.text_type(e), traceback.format_exc()),
limit_once_per_x_secs=60,
limit_key="k8s_init_generic_error",
)
self._update_initialization_error(
component, "Unhandled error %s" % six.text_type(e)
)
# Unknown error. TODO: Maybe do exponential backoff here?
retry_delay_secs = 0.5
current_time = time.time()
elapsed = current_time - start_time
global_log.info("Kubernetes cache initialized in %.2f seconds" % elapsed)
local_state = self._state.copy_state()
# go back to sleep if we haven't taken longer than the expiry time
if elapsed < local_state.cache_expiry_secs:
global_log.log(
scalyr_logging.DEBUG_LEVEL_1,
"sleeping for %.2f seconds" % (local_state.cache_expiry_secs - elapsed),
)
run_state.sleep_but_awaken_if_stopped(
local_state.cache_expiry_secs - elapsed
)
# start the main update loop
last_purge = time.time()
while run_state.is_running():
# get cache state values that will be consistent for the duration of the loop iteration
local_state = self._state.copy_state()
try:
current_time = time.time()
global_log.log(
scalyr_logging.DEBUG_LEVEL_1, "Marking unused pods as expired"
)
self._pods_cache.mark_as_expired(current_time)
self._update_cluster_name(local_state.k8s)
self._update_api_server_version_if_necessary(
local_state.k8s, current_time=current_time
)
if last_purge + local_state.cache_purge_secs < current_time:
global_log.log(
scalyr_logging.DEBUG_LEVEL_1,
"Purging unused controllers last_purge=%s cache_purge_secs=%s current_time=%s"
% (last_purge, local_state.cache_purge_secs, current_time),
)
self._controllers.purge_unused(last_purge)
# purge any pods that haven't been queried within the cache_purge_secs
global_log.log(scalyr_logging.DEBUG_LEVEL_1, "Purging stale pods")
self._pods_cache.purge_unused(last_purge)
last_purge = current_time
except K8sApiException as e:
global_log.warn(
"Exception occurred when updating k8s cache - %s"
% (six.text_type(e)),
limit_once_per_x_secs=300,
limit_key="k8s_api_update_cache",
)
except Exception as e:
global_log.warn(
"Exception occurred when updating k8s cache - %s\n%s"
% (six.text_type(e), traceback.format_exc())
)
# Fuzz how much time we spend until the next cycle. This should spread out when the agents query the
# API master over time in clusters with a larger number of agents.
if local_state.cache_expiry_fuzz_secs > 0:
fuzz_factor = max(
random.uniform(0, local_state.cache_expiry_fuzz_secs), 0
)
else:
fuzz_factor = 0
run_state.sleep_but_awaken_if_stopped(
local_state.cache_expiry_secs - fuzz_factor
)
def pod(
self,
namespace,
name,
current_time=None,
allow_expired=True,
query_options=None,
ignore_k8s_api_exception=False,
):
"""Returns pod info for the pod specified by namespace and name or None if no pad matches.
Warning: Failure to pass current_time leads to incorrect recording of last access times, which will
lead to these objects being refreshed prematurely (potential source of bugs)
Querying the pod information is thread-safe, but the returned object should
not be written to.
@param allow_expired: If True, an object is considered present in cache even if it is expired.
@type allow_expired: bool
"""
local_state = self._state.copy_state()
if local_state.k8s is None:
return
return self._pods_cache.lookup(
local_state.k8s,
current_time,
namespace,
name,
kind="Pod",
allow_expired=allow_expired,
query_options=query_options,
ignore_k8s_api_exception=ignore_k8s_api_exception,
)
def is_pod_cached(self, namespace, name, allow_expired):
"""Returns true if the specified pod is in the cache and isn't expired.
Warning: Failure to pass current_time leads to incorrect recording of last access times, which will
lead to these objects being refreshed prematurely (potential source of bugs)
@param namespace: The pod's namespace
@param name: The pod's name
@param allow_expired: If True, an object is considered present in cache even if it is expired.
@type namespace: str
@type name: str
@type allow_expired: bool
@return: True if the pod is cached.
@rtype: bool
"""
return self._pods_cache.is_cached(namespace, name, allow_expired)
def controller(self, namespace, name, kind, current_time=None, query_options=None):
"""Returns controller info for the controller specified by namespace and name
or None if no controller matches.
Warning: Failure to pass current_time leads to incorrect recording of last access times, which will
lead to these objects being refreshed prematurely (potential source of bugs)
Querying the controller information is thread-safe, but the returned object should
not be written to.
"""
local_state = self._state.copy_state()
if local_state.k8s is None:
return
return self._controllers.lookup(
local_state.k8s,
current_time,
namespace,
name,
kind=kind,
query_options=query_options,
ignore_k8s_api_exception=True,
)
def pods_shallow_copy(self):
"""Retuns a shallow copy of the pod objects"""
return self._pods_cache.shallow_copy()
def get_cluster_name(self):
"""Returns the cluster name"""
result = None
self._lock.acquire()
try:
result = self._cluster_name
finally:
self._lock.release()
return result
def get_container_runtime(self):
"""Returns the k8s container runtime currently being used"""
result = None
self._lock.acquire()
try:
result = self._container_runtime
finally:
self._lock.release()
return result
def get_api_server_version(self):
"""Returns API server version"""
result = None
self._lock.acquire()
try:
result = self._api_server_version
finally:
self._lock.release()
return result
class KubernetesApi(object):
"""Simple wrapper class for querying the k8s api
"""
@staticmethod
def create_instance(
global_config,
k8s_api_url=None,
query_timeout=None,
verify_api_queries=None,
rate_limiter_key="K8S_CACHE_MAIN_RATELIMITER",
):
"""
@param global_config: Global configuration
@param k8s_api_url: overrides global config api url
@param query_timeout: overrides global config query timeout
@param verify_api_queries: overrides global config verify_api_queries
@param rate_limiter_key: Allow overriding of rate limiter key, otherwise, uses the "main" k8s cache ratelimiter
"""
if k8s_api_url is None:
k8s_api_url = global_config.k8s_api_url
if query_timeout is None:
query_timeout = global_config.k8s_cache_query_timeout_secs
if verify_api_queries is None:
verify_api_queries = global_config.k8s_verify_api_queries
kwargs = {
"k8s_api_url": k8s_api_url,
"query_timeout": query_timeout,
}
if not verify_api_queries:
kwargs["ca_file"] = None
elif global_config:
kwargs["ca_file"] = global_config.k8s_service_account_cert
if global_config:
kwargs.update(
{
"log_api_responses": global_config.k8s_log_api_responses,
"log_api_exclude_200s": global_config.k8s_log_api_exclude_200s,
"log_api_min_response_len": global_config.k8s_log_api_min_response_len,
"log_api_min_latency": global_config.k8s_log_api_min_latency,
"log_api_ratelimit_interval": global_config.k8s_log_api_ratelimit_interval,
"agent_log_path": global_config.agent_log_path,
"query_options_max_retries": global_config.k8s_controlled_warmer_max_query_retries,
"rate_limiter": BlockingRateLimiter.get_instance(
rate_limiter_key, global_config, logger=global_log
),
"token_file": global_config.k8s_service_account_token,
"namespace_file": global_config.k8s_service_account_namespace,
}
)
return KubernetesApi(**kwargs)
def __init__(
self,
ca_file="/run/secrets/kubernetes.io/serviceaccount/ca.crt",
k8s_api_url="https://kubernetes.default",
query_timeout=20,
log_api_responses=False,
log_api_exclude_200s=False,
log_api_min_response_len=False,
log_api_min_latency=0.0,
log_api_ratelimit_interval=300,
agent_log_path=None,
query_options_max_retries=3,
rate_limiter=None,
token_file="/var/run/secrets/kubernetes.io/serviceaccount/token",
namespace_file="/var/run/secrets/kubernetes.io/serviceaccount/namespace",
):
"""Init the kubernetes object"""
self.log_api_responses = log_api_responses
self.log_api_exclude_200s = log_api_exclude_200s
self.log_api_min_response_len = log_api_min_response_len
self.log_api_min_latency = log_api_min_latency
self.log_api_ratelimit_interval = log_api_ratelimit_interval
self.agent_log_path = agent_log_path
self._http_host = k8s_api_url
global_log.log(
scalyr_logging.DEBUG_LEVEL_1, "Kubernetes API host: %s", self._http_host
)
self.query_timeout = query_timeout
self._session = None
self._ca_file = ca_file
# We create a few headers ahead of time so that we don't have to recreate them each time we need them.
self._standard_headers = {
"Connection": "Keep-Alive",
"Accept": "application/json",
}
# The k8s API requires us to pass in an authentication token
# which we can obtain from a token file in a 'well known' location
self.token = ""
try:
# using with is ok here, because we need to be running
# a recent version of python for various 3rd party libs
f = open(token_file, "r")
try:
self.token = f.read()
finally:
f.close()
except IOError:
pass
# get the namespace this pod is running on
self.namespace = "default"
try:
# using with is ok here, because we need to be running
# a recent version of python for various 3rd party libs
f = open(namespace_file, "r")
try:
self.namespace = f.read()
finally:
f.close()
except IOError:
pass
self._standard_headers["Authorization"] = "Bearer %s" % (self.token)
# A rate limiter should normally be passed unless no rate limiting is desired.
self._query_options_max_retries = query_options_max_retries
self._rate_limiter = rate_limiter
@property
def default_query_options(self):
if not self._rate_limiter:
return None
return ApiQueryOptions(
max_retries=self._query_options_max_retries, rate_limiter=self._rate_limiter
)
def _verify_connection(self):
""" Return whether or not to use SSL verification
"""
if self._ca_file:
return self._ca_file
return False
def _ensure_session(self):
"""Create the session if it doesn't exist, otherwise do nothing
"""
if not self._session:
self._session = requests.Session()
self._session.headers.update(self._standard_headers)
def get_pod_name(self):
""" Gets the pod name of the pod running the scalyr-agent """
# 2->TODO in python2 os.environ returns 'str' type. Convert it to unicode.
return os_environ_unicode.get("SCALYR_K8S_POD_NAME") or os_environ_unicode.get(
"HOSTNAME"
)
def get_node_name(self, pod_name):
""" Gets the node name of the node running the agent """
# 2->TODO in python2 os.environ returns 'str' type. Convert it to unicode.
node = os_environ_unicode.get("SCALYR_K8S_NODE_NAME")
if not node:
pod = self.query_pod(self.namespace, pod_name)
spec = pod.get("spec", {})
node = spec.get("nodeName")
return node
def get_api_server_version(self):
"""Get the API server version (specifically the server gitVersion)
@return: The gitVersion extracted from /version JSON
@rtype: str
"""
version_map = self.query_api_with_retries(
"/version",
retry_error_context="get_api_server_version",
retry_error_limit_key="get_api_server_version",
)
return version_map.get("gitVersion")
def get_cluster_name(self):
""" Returns the name of the cluster running this agent.
There is no way to get this from the k8s API so we check the following:
If the environment variable SCALYR_K8S_CLUSTER_NAME is set, then use that.
Otherwise query the api for the pod running the agent container and check to see
if it has an annotation: agent.config.scalyr.com/cluster_name, and if so, use that.
Otherwise return None
"""
# 2->TODO in python2 os.environ returns 'str' type. Convert it to unicode.
cluster = os_environ_unicode.get("SCALYR_K8S_CLUSTER_NAME", "")
if cluster:
return cluster
pod_name = self.get_pod_name()
pod = self.query_pod(self.namespace, pod_name)
if pod is None:
return None
metadata = pod.get("metadata", {})
annotations = metadata.get("annotations", {})
if "agent.config.scalyr.com/cluster_name" in annotations:
return annotations["agent.config.scalyr.com/cluster_name"]
return None
def query_api_with_retries(
self,
query,
query_options="not-set",
retry_error_context=None,
retry_error_limit_key=None,
):
"""Invoke query api through rate limiter with retries
@param query: Query string
@param query_options: ApiQueryOptions containing retries and rate_limiter.
Explicit None signifies no rate limiting.
Default 'not-set' signifies "use k8s-instance specific rate limiter and query options
@param retry_error_context: context object whose string representation is logged upon failure (if None)
@param retry_error_limit_key: key for limiting retry logging
@type query: str
@type query_options: ApiQueryOptions
@type retry_error_context: object
@type retry_error_limit_key: str
@return: json-decoded response of the query api call
@rtype: dict or a scalyr_agent.json_lib.objects.JsonObject
"""
if not query_options:
return self.query_api(query)
if query_options == "not-set":
query_options = self.default_query_options
retries_left = query_options.max_retries
rate_limiter = query_options.rate_limiter
while True:
t = time.time()
token = rate_limiter.acquire_token()
rate_limit_outcome = False
try:
result = self.query_api(
query,
return_temp_errors=query_options.return_temp_errors,
rate_limited=True,
)
rate_limit_outcome = True
global_log.log(
scalyr_logging.DEBUG_LEVEL_3,
"Rate limited k8s api query took %s seconds" % (time.time() - t),
)
return result
except K8sApiNotFoundException:
# catch and re-raise this before any other temporary errors, because we need to
# handle this one separately. Rather than immediately retrying, we won't do anything,
# rather, if the agent wants to query this endpoint again later then it will.
# This is useful for when a pod hasn't fully started up yet and querying its endpoint
# will return a 404. Then if you query again a few seconds later everything works.
rate_limit_outcome = True
raise
except K8sApiTemporaryError as e:
rate_limit_outcome = False
if retries_left <= 0:
raise e
retries_left -= 1
if retry_error_context:
global_log.warn(
"k8s API - retrying temporary error: %s" % retry_error_context,
limit_once_per_x_secs=300,
limit_key="k8s_api_retry-%s" % retry_error_limit_key,
)
finally:
# Any uncaught exceptions will result in an outcome of False
rate_limiter.release_token(token, rate_limit_outcome)
def __open_api_response_log(self, path, rate_limited):
"""Opens a file for logging the api response
The file will be located in agent_log_dir/kapi/(limited/not_limited) depending on whether the
api call is rate limited or not.
@param path: The URL path to be queried (also embedded in the filename)
@param rate_limited: Whether the response is rate limited or not
@type path: str
@type rate_limited: bool
@returns File handle to the api response log file or None upon failure.
@rtype: file handle
"""
# try to open the logged_response_file
try:
kapi = os.path.join(self.agent_log_path, "kapi")
if not os.path.exists(kapi):
os.mkdir(kapi, 0o755)
if rate_limited:
kapi = os.path.join(kapi, "limited")
else:
kapi = os.path.join(kapi, "limited")
if not os.path.exists(kapi):
os.mkdir(kapi, 0o755)
fname = "%s_%.20f_%s_%s" % (
strftime("%Y%m%d-%H-%M-%S", gmtime()),
time.time(),
random.randint(1, 100),
path.replace("/", "--"),
)
# if logging responses to disk, always prepend the stack trace for easier debugging
return open(os.path.join(kapi, fname), "w")
except IOError:
pass
def __check_for_fake_response(self, logged_response_file):
"""Helper method that checks for a well known file on disk and simulates timeouts from API master
If successfully logging responses, we can also check for a local "simfile" to simulate API master timeout.
The simfile is a textfile that contains the HTTP error code we want to simulate.
This method should only be called during development.
@param logged_response_file: Logged-response logfile
@type logged_response_file: file handle
@raises K8sApiTemporaryError: if the simfile contains one of the following http error codes that we consider
as a temporary error.
"""
fake_response_file = os.path.join(self.agent_log_path, "simfile")
if os.path.isfile(fake_response_file):
fake_response_code = None
try:
fake_f = open(fake_response_file, "r")
try:
fake_response_code = fake_f.read().strip()
finally:
fake_f.close()
except Exception:
if logged_response_file:
logged_response_file.write(
"Error encountered while attempting to fake a response code:\n%s\n\n"
% traceback.format_exc()
)
if fake_response_code in ["404", "503", "429"]:
global_log.log(
scalyr_logging.DEBUG_LEVEL_3,
"Faking api master temporary error (%s) for url: %s",
limit_once_per_x_secs=300,
limit_key="k8s_api_query_fake_temporary_error",
)
raise K8sApiTemporaryError("Fake %s" % fake_response_code)
def query_api(self, path, pretty=0, return_temp_errors=False, rate_limited=False):
""" Queries the k8s API at 'path', and converts OK responses to JSON objects
"""
self._ensure_session()
pretty = "pretty=%d" % pretty
if "?" in path:
pretty = "&%s" % pretty
else:
pretty = "?%s" % pretty
url = self._http_host + path + pretty
response = None
# Various state used logging of responses
log_responses = self.log_api_responses
logged_response = []
response_status_code = -1
response_len = 0
try:
# Optionally prepend stack trace into logged response
if log_responses:
limited_txt = ""
if rate_limited:
limited_txt = " (rate limited)"
logged_response.append("k8s.query_api%s: %s" % (limited_txt, path))
stack_trace = "".join(traceback.format_stack())
logged_response.append("\\n%s" % stack_trace.replace("\n", "\\n"))
# Make actual API call
latency = float("inf")
examine_latency = log_responses and self.log_api_min_latency > 0
if examine_latency:
t1 = time.time()
try:
response = self._session.get(
url, verify=self._verify_connection(), timeout=self.query_timeout
)
response.encoding = "utf-8"
except Exception as e:
if return_temp_errors:
raise K8sApiTemporaryError(
"Temporary error seen while accessing api: %s"
% six.text_type(e)
)
else:
raise
finally:
# conditionally record latency regardless of exception
if examine_latency:
latency = time.time() - t1
# No exception case: record response status code and length
response_status_code = response.status_code
if response.text:
response_len = len(response.text)
if response.status_code != 200:
if response.status_code == 401 or response.status_code == 403:
raise K8sApiAuthorizationException(
path, status_code=response.status_code
)
elif response.status_code == 404:
raise K8sApiNotFoundException(
path, status_code=response.status_code
)
global_log.log(
scalyr_logging.DEBUG_LEVEL_3,
"Invalid response from K8S API.\n\turl: %s\n\tstatus: %d\n\tresponse length: %d"
% (url, response.status_code, len(response.text)),
limit_once_per_x_secs=300,
limit_key="k8s_api_query",
)
if return_temp_errors:
raise K8sApiTemporaryError(
"Invalid response from Kubernetes API when querying '%s': %s"
% (path, six.text_type(response)),
status_code=response.status_code,
)
else:
raise K8sApiException(
"Invalid response from Kubernetes API when querying '%s': %s"
% (path, six.text_type(response)),
status_code=response.status_code,
)
# Optionally prepend stack trace into logged response
# newlines should become literal '\n' so that the entire response is a single line
if log_responses and response.text:
logged_response.append(response.text.replace("\n", "\\n"))
return util.json_decode(response.text)
finally:
# Only debug-log the response if all criteria (status code, latency, response len) are satisfied
if log_responses:
# Always log non-200 responses (using integer division to inspect first digit.
# For 200 responses, only log if log_api_exclude_200s is False.
if (response_status_code // 100) != 2 or not self.log_api_exclude_200s:
if (
response_len >= self.log_api_min_response_len
and latency >= self.log_api_min_latency
):
# Log the url, stacktrace and response text as a single line of text
global_log.log(
scalyr_logging.DEBUG_LEVEL_1,
"\\n\\n".join(logged_response),
limit_once_per_x_secs=self.log_api_ratelimit_interval,
limit_key="query-api-log-resp-%s"
% util.md5_hexdigest(path),
)
def query_object(self, kind, namespace, name, query_options=None):
""" Queries a single object from the k8s api based on an object kind, a namespace and a name
An empty dict is returned if the object kind is unknown, or if there is an error generating
an appropriate query string
@param kind: the kind of the object
@param namespace: the namespace to query in
@param name: the name of the object
@return - a dict returned by the query
"""
if kind not in _OBJECT_ENDPOINTS:
global_log.warn(
"k8s API - tried to query invalid object type: %s, %s, %s. Creating dummy object"
% (kind, namespace, name),
limit_once_per_x_secs=300,
limit_key="k8s_api_query-%s" % kind,
)
if kind is None:
kind = "<invalid>"
# return a dummy object with valid kind, namespace and name members
return {"kind": kind, "metadata": {"namespace": namespace, "name": name}}
query = None
try:
query = _OBJECT_ENDPOINTS[kind]["single"].substitute(
name=name, namespace=namespace
)
except Exception as e:
global_log.warn(
"k8s API - failed to build query string - %s" % (six.text_type(e)),
limit_once_per_x_secs=300,
limit_key="k8s_api_build_query-%s" % kind,
)
return {}
return self.query_api_with_retries(
query,
query_options=query_options,
retry_error_context="%s, %s, %s" % (kind, namespace, name),
retry_error_limit_key="query_object-%s" % kind,
)
def query_objects(self, kind, namespace=None, filter=None):
""" Queries a list of objects from the k8s api based on an object kind, optionally limited by
a namespace and a filter
A dict containing an empty 'items' array is returned if the object kind is unknown, or if there is an error generating
an appropriate query string
"""
if kind not in _OBJECT_ENDPOINTS:
global_log.warn(
"k8s API - tried to list invalid object type: %s, %s"
% (kind, namespace),
limit_once_per_x_secs=300,
limit_key="k8s_api_list_query-%s" % kind,
)
return {"items": []}
query = _OBJECT_ENDPOINTS[kind]["list-all"]
if namespace:
try:
query = _OBJECT_ENDPOINTS[kind]["list"].substitute(namespace=namespace)
except Exception as e:
global_log.warn(
"k8s API - failed to build namespaced query list string - %s"
% (six.text_type(e)),
limit_once_per_x_secs=300,
limit_key="k8s_api_build_list_query-%s" % kind,
)
if filter:
query = "%s?fieldSelector=%s" % (
query,
six.moves.urllib.parse.quote(filter),
)
return self.query_api_with_retries(
query,
retry_error_context="%s, %s" % (kind, namespace),
retry_error_limit_key="query_objects-%s" % kind,
)
def query_pod(self, namespace, name):
"""Convenience method for query a single pod"""
return self.query_object("Pod", namespace, name)
def query_pods(self, namespace=None, filter=None):
"""Convenience method for query a single pod"""
return self.query_objects("Pod", namespace, filter)
def query_namespaces(self):
"""Wrapper to query all namespaces"""
return self.query_api_with_retries(
"/api/v1/namespaces",
retry_error_context="query_pods",
retry_error_limit_key="query_pods",
)
def stream_events(self, path="/api/v1/watch/events", last_event=None):
"""Streams k8s events from location specified at path"""
self._ensure_session()
url = self._http_host + path
if last_event:
resource = "resourceVersion=%s" % six.text_type(last_event)
if "?" in url:
resource = "&%s" % resource
else:
resource = "?%s" % resource
url += resource
response = self._session.get(
url,
verify=self._verify_connection(),
timeout=self.query_timeout,
stream=True,
)
if response.status_code != 200:
global_log.log(
scalyr_logging.DEBUG_LEVEL_0,
"Invalid response from K8S API.\n\turl: %s\n\tstatus: %d\n\tresponse length: %d"
% (url, response.status_code, len(response.text)),
limit_once_per_x_secs=300,
limit_key="k8s_stream_events",
)
raise K8sApiException(
"Invalid response from Kubernetes API when querying %d - '%s': %s"
% (response.status_code, path, six.text_type(response)),
status_code=response.status_code,
)
for line in response.iter_lines():
if line:
yield line
class KubeletApi(object):
"""
A class for querying the kubelet API
"""
def __init__(
self,
k8s,
host_ip=None,
kubelet_url_template=Template("https://${host_ip}:10250"),
):
"""
@param k8s - a KubernetesApi object
"""
self._host_ip = host_ip
if self._host_ip is None:
try:
pod_name = k8s.get_pod_name()
pod = k8s.query_pod(k8s.namespace, pod_name)
status = pod.get("status", {})
self._host_ip = status.get("hostIP", None)
# Don't raise exception for now
# if host_ip is None:
# raise KubeletApiException( "Unable to get host IP for pod: %s/%s" % (k8s.namespace, pod_name) )
except Exception:
global_log.exception("couldn't get host ip")
pass
self._session = requests.Session()
headers = {
"Accept": "application/json",
"Authorization": "Bearer %s" % k8s.token,
}
self._session.headers.update(headers)
global_log.info("KubeletApi host ip = %s" % self._host_ip)
self._kubelet_url = self._build_kubelet_url(kubelet_url_template, host_ip)
self._fallback_kubelet_url = self._build_kubelet_url(
FALLBACK_KUBELET_URL_TEMPLATE, host_ip
)
self._timeout = 20.0
@staticmethod
def _build_kubelet_url(kubelet_url, host_ip):
if host_ip:
return kubelet_url.substitute(host_ip=host_ip)
return None
def _switch_to_fallback(self):
self._kubelet_url = self._fallback_kubelet_url
def query_api(self, path):
""" Queries the kubelet API at 'path', and converts OK responses to JSON objects
"""
while True:
url = self._kubelet_url + path
response = self._session.get(url, timeout=self._timeout, verify=False)
response.encoding = "utf-8"
if response.status_code != 200:
if (
response.status_code == 403
and self._kubelet_url != self._fallback_kubelet_url
):
global_log.log(
scalyr_logging.DEBUG_LEVEL_3,
"Invalid response while querying the Kubelet API: %d. Falling back to older endpoint."
% response.status_code,
)
self._switch_to_fallback()
continue
else:
global_log.log(
scalyr_logging.DEBUG_LEVEL_3,
"Invalid response from Kubelet API.\n\turl: %s\n\tstatus: %d\n\tresponse length: %d"
% (url, response.status_code, len(response.text)),
limit_once_per_x_secs=300,
limit_key="kubelet_api_query",
)
raise KubeletApiException(
"Invalid response from Kubelet API when querying '%s': %s"
% (path, six.text_type(response))
)
return util.json_decode(response.text)
def query_pods(self):
return self.query_api("/pods")
def query_stats(self):
return self.query_api("/stats/summary")
class DockerMetricFetcher(object):
"""Allows for parallel fetching of container metrics from Docker. Typically, one instance of this object
will be created per monitor (Docker or Kubernetes). This current implementation relies on threads to
issue multiple `stats` requests in parallel.
This approach is necessary because the `stats` Docker command blocks for 2 seconds while it gathers
cpu measures over the interval. If we had 40 containers whose metrics we were trying to retrieve, we would
have to wait for a total of 80 seconds if we issued the `stats` request one at a time.
To get the benefit of this approach, you must first invoke `prefetch_metrics` for each container whose metrics
you wish to retrieve, and then invoke `get_metrics` to actually get the metrics.
"""
def __init__(self, docker_client, concurrency):
"""
@param docker_client: The docker client object to use for issuing `stats` requests.
@param concurrency: The maximum number of `stats` requests to issue in parallel. This controls the maximum
number of threads that will be created.
@type docker_client: k8s_test.MetricFaker
@type concurrency: int
"""
self.__docker_client = docker_client
self.__concurrency = concurrency
# A sentinel value used in the `__container_scoreboard` to indicate the container is in the queue to be fetched.
self.__PENDING = dict()
# A sentinel value used in the `__container_scoreboard` to indicate the `stats` call for a container has been
# issued but no response has been received.
self.__IN_FLIGHT = dict()
# The lock that must be held for all other state variables in this class.
self.__lock = threading.Lock()
# Records the state of requesting metrics for all containers. Maps the container name to its state or
# metric value. If the value is __PENDING, then the `stats` request for the request has not been issued.
# If it is __IN_FLIGHT, it has been requested. If it is None, an error occurred. Otherwise, the value
# is the result of the `stats` request.
self.__container_scoreboard = dict()
# Whether or not `stop` has been invoked.
self.__is_stopped = False
# The conditional variable that can be waited on to be notified of any changes to the state of this object,
# such as whether it has been stopped or if a stats results has been added in to `__container_scoreboard`.
self.__cv = threading.Condition(self.__lock)
# The number of worker threads (to perform `stats` calls) that have been created. This will always be
# less than `concurrency`.
self.__num_worker_threads = 0
# A list of containers whose metrics should be fetched. This is the same as all entries in
# `__container_scoreboard` whose value is `__PENDING`.
self.__pending_fetches = []
# The total number of containers in `__container_scoreboard` with value either `__PENDING` or `__IN_FLIGHT`.
self.__remaining_metrics_count = 0
# The number of worker threads blocked, waiting for a container to fetch its metrics.
self.__idle_workers_count = 0
def prefetch_metrics(self, container_id):
"""Initiates requesting invoking `stats` for the specified container. If you invoke this, you must
also eventually invoke `get_metrics` with the same container. By invoking this first, the `get_metrics`
call will take less time when issuing many `stats` requests.
Whenever possible, you should first invoke this method for all containers whose metrics you wish to request
before any call to `get_metrics`.
The behavior is not well defined if you invoke `prefetch_metrics` multiple times for a container before
invoking `get_metrics` for it.
@param container_id: The id of the container to fetch.
@type container_id: str
"""
self.__lock.acquire()
try:
if container_id not in self.__container_scoreboard:
self._add_fetch_task(container_id)
finally:
self.__lock.release()
def get_metrics(self, container_id):
"""Blocks until the `stats` call for the specified container is received. If `prefetch_metrics` was not
invoked already for this container, then the `stats` request will be issued.
@param container_id: The container whose metrics should be fetched.
@type container_id: str
@return The metrics for the container, or None if there was an error or if `stop` was invoked on this object.
@rtype JSON
"""
self.__lock.acquire()
try:
while True:
if self.__is_stopped:
return None
# Fetch the result if it was prefetched.
if container_id not in self.__container_scoreboard:
self._add_fetch_task(container_id)
status = self.__container_scoreboard[container_id]
if status is not self.__PENDING and status is not self.__IN_FLIGHT:
result = self.__container_scoreboard[container_id]
del self.__container_scoreboard[container_id]
return result
# Otherwise no result has been received yet.. wait..
self.__cv.wait()
finally:
self.__lock.release()
def stop(self):
"""Stops the fetcher. Any calls blocking on `get_metrics` will finish and return `None`. All threads
started by this instance will be stopped (though, this method does not wait on them to terminate).
"""
self.__lock.acquire()
try:
self.__is_stopped = True
# Notify all threads that may be waiting on a new container or waiting on a metric result that we have
# been stopped.
self.__cv.notifyAll()
finally:
self.__lock.release()
def idle_workers(self):
"""
Used for testing.
@return: The number of worker threads currently blocking, waiting for a container whose metrics need fetching.
@rtype: int
"""
self.__lock.acquire()
try:
return self.__idle_workers_count
finally:
self.__lock.release()
def _get_fetch_task(self):
"""Blocks until either there is a new container whose metrics need to be fetched or until this instance
is stopped.
@return: A tuple containing the container whose metrics should be fetched and a boolean indicating if the
instance has been stopped. If it has been stopped, the container will be None.
@rtype: (str, bool)
"""
self.__lock.acquire()
try:
while True:
if self.__is_stopped:
return None, True
if len(self.__pending_fetches) > 0:
container = self.__pending_fetches.pop(0)
self.__container_scoreboard[container] = self.__PENDING
self.__idle_workers_count -= 1
return container, False
self.__cv.wait()
finally:
self.__lock.release()
def __start_workers(self, count):
"""Start `count` worker threads that will fetch metrics results.
@param count: The number of threads to start.
@type count: int
"""
new_number_workers = min(self.__concurrency, count + self.__num_worker_threads)
for i in range(self.__num_worker_threads, new_number_workers):
x = threading.Thread(target=self.__worker)
# Set daemon so this thread does not need to be finished for the overall process to stop. This allows
# the process to terminate even if a `stats` request is still in-flight.
x.setDaemon(True)
x.start()
self.__num_worker_threads += 1
# For accounting purposes,we consider the thread idle until it actually has a container it is fetching.
self.__idle_workers_count += 1
def __worker(self):
"""The body for the worker threads.
"""
while True:
# Get the next container to fetch if there is one.
container_id, is_stopped = self._get_fetch_task()
if is_stopped:
return
result = None
try:
global_log.log(
scalyr_logging.DEBUG_LEVEL_3,
"Attempting to retrieve metrics for cid=%s" % container_id,
)
result = self.__docker_client.stats(
container=container_id, stream=False
)
except Exception as e:
global_log.error(
"Error readings stats for '%s': %s\n%s"
% (container_id, six.text_type(e), traceback.format_exc()),
limit_once_per_x_secs=300,
limit_key="api-stats-%s" % container_id,
)
self._record_fetch_result(container_id, result)
def _add_fetch_task(self, container_id):
"""Adds the specified container to the list of containers whose metrics will be fetched. Eventually, a worker
thread will grab this container and fetch its metrics.
IMPORTANT: callers must hold `__lock` when invoking this method.
@param container_id: The container whose metrics should be fetched.
@type container_id: str
"""
self.__remaining_metrics_count += 1
self.__container_scoreboard[container_id] = self.__PENDING
self.__pending_fetches.append(container_id)
# Notify any worker threads waiting for a container.
self.__cv.notifyAll()
# We need to spin up new worker threads if the amount of remaining metrics (PENDING or IN-FLIGHT) is greater
# than the number of threads we already have.
if (
self.__remaining_metrics_count > self.__num_worker_threads
and self.__num_worker_threads < self.__concurrency
):
self.__start_workers(
self.__remaining_metrics_count - self.__num_worker_threads
)
def _record_fetch_result(self, container_id, result):
"""Record that the `stats` result for the specified container. If there was an error, result should be
None.
@type container_id: str
@type result: JsonObject
"""
self.__lock.acquire()
try:
self.__container_scoreboard[container_id] = result
self.__remaining_metrics_count -= 1
# Since this is only invoked by a worker once their stats call is done, we know they are now idle.
self.__idle_workers_count += 1
# Wake up any thread that was waiting on this result.
self.__cv.notifyAll()
finally:
self.__lock.release()
def _create_k8s_cache():
"""
creates a new k8s cache object
"""
return KubernetesCache(start_caching=False)
# global cache object - the module loading system guarantees this is only ever
# initialized once, regardless of how many modules import k8s.py
_k8s_cache = _create_k8s_cache()
AGENT-377 Suppress warning when accessing kubelet (#524)
* AGENT-377 Suppress warning when accessing kubelet
* Comment on thread safety
* Warning for unverified https requests to the kubelet
from __future__ import unicode_literals
from __future__ import absolute_import
import hashlib
import os
import random
import re
import warnings
from string import Template
import sys
import threading
import time
from time import strftime, gmtime
import traceback
from io import open
import six
import six.moves.urllib.request
import six.moves.urllib.parse
import six.moves.urllib.error
from six.moves import range
import scalyr_agent.monitor_utils.annotation_config as annotation_config
from scalyr_agent.monitor_utils.annotation_config import BadAnnotationConfig
from scalyr_agent.monitor_utils.blocking_rate_limiter import BlockingRateLimiter
import scalyr_agent.third_party.requests as requests
from scalyr_agent.util import StoppableThread
from scalyr_agent.json_lib import JsonObject
import scalyr_agent.scalyr_logging as scalyr_logging
import scalyr_agent.util as util
from scalyr_agent.compat import os_environ_unicode
global_log = scalyr_logging.getLogger(__name__)
# A regex for splitting a container id and runtime
_CID_RE = re.compile("^(.+)://(.+)$")
# endpoints used by the agent for querying the k8s api. Having this mapping allows
# us to avoid special casing the logic for each different object type. We can just
# look up the appropriate endpoint in this dict and query objects however we need.
#
# The dict is keyed by object kind, and or each object kind, there are 3 endpoints:
# single, list and list all.
#
# `single` is for querying a single object of a specific type
# `list` is for querying all objects of a given type in a specific namespace
# `list-all` is for querying all objects of a given type in the entire cluster
#
# the `single` and `list` endpoints are Templates that require the caller to substitute
# in the appropriate values for ${namespace} and ${name}
_OBJECT_ENDPOINTS = {
"CronJob": {
"single": Template(
"/apis/batch/v1beta1/namespaces/${namespace}/cronjobs/${name}"
),
"list": Template("/apis/batch/v1beta1/namespaces/${namespace}/cronjobs"),
"list-all": "/apis/batch/v1beta1/cronjobs",
},
"DaemonSet": {
"single": Template("/apis/apps/v1/namespaces/${namespace}/daemonsets/${name}"),
"list": Template("/apis/apps/v1/namespaces/${namespace}/daemonsets"),
"list-all": "/apis/apps/v1/daemonsets",
},
"Deployment": {
"single": Template("/apis/apps/v1/namespaces/${namespace}/deployments/${name}"),
"list": Template("/apis/apps/v1/namespaces/${namespace}/deployments"),
"list-all": "/apis/apps/v1/deployments",
},
"Job": {
"single": Template("/apis/batch/v1/namespaces/${namespace}/jobs/${name}"),
"list": Template("/apis/batch/v1/namespaces/${namespace}/jobs"),
"list-all": "/apis/batch/v1/jobs",
},
"Pod": {
"single": Template("/api/v1/namespaces/${namespace}/pods/${name}"),
"list": Template("/api/v1/namespaces/${namespace}/pods"),
"list-all": "/api/v1/pods",
},
"ReplicaSet": {
"single": Template("/apis/apps/v1/namespaces/${namespace}/replicasets/${name}"),
"list": Template("/apis/apps/v1/namespaces/${namespace}/replicasets"),
"list-all": "/apis/apps/v1/replicasets",
},
"ReplicationController": {
"single": Template(
"/api/v1/namespaces/${namespace}/replicationcontrollers/${name}"
),
"list": Template("/api/v1/namespaces/${namespace}/replicationcontrollers"),
"list-all": "/api/v1/replicationcontrollers",
},
"StatefulSet": {
"single": Template(
"/apis/apps/v1/namespaces/${namespace}/statefulsets/${name}"
),
"list": Template("/apis/apps/v1/namespaces/${namespace}/statefulsets"),
"list-all": "/apis/apps/v1/statefulsets",
},
}
# Template for an older kubelet endpoint that we may want to fall back to if the new one is unavailable due to an
# older kubernetes version
FALLBACK_KUBELET_URL_TEMPLATE = Template("http://${host_ip}:10255")
def cache(global_config):
"""
Returns the global k8s cache, configured using the options in `config`
@param config: The configuration
@type config: A Scalyr Configuration object
"""
# split comma delimited string of namespaces to ignore in to a list of strings
namespaces_to_ignore = []
for x in global_config.k8s_ignore_namespaces:
namespaces_to_ignore.append(x.strip())
cache_config = _CacheConfig(
api_url=global_config.k8s_api_url,
verify_api_queries=global_config.k8s_verify_api_queries,
cache_expiry_secs=global_config.k8s_cache_expiry_secs,
cache_expiry_fuzz_secs=global_config.k8s_cache_expiry_fuzz_secs,
cache_start_fuzz_secs=global_config.k8s_cache_start_fuzz_secs,
cache_purge_secs=global_config.k8s_cache_purge_secs,
query_timeout=global_config.k8s_cache_query_timeout_secs,
global_config=global_config,
)
# update the config and return current cache
_k8s_cache.update_config(cache_config)
return _k8s_cache
def terminate_agent_process(reason):
"""Terminate this agent process, causing the pod running the agent to restart the agent container.
:param reason: The termination reason which will be written in the K8s termination log and crash report.
:type reason: six.text_type
"""
try:
with open("/dev/termination-log", "w") as fp:
fp.write(reason)
finally:
sys.exit(1)
class K8sApiException(Exception):
"""A wrapper around Exception that makes it easier to catch k8s specific
exceptions
"""
def __init__(self, message, status_code=0):
super(K8sApiException, self).__init__(message)
self.status_code = status_code
class K8sApiTemporaryError(K8sApiException):
"""The base class for all temporary errors where a retry may result in success (timeouts, too many requests,
etc) returned when issuing requests to the K8s API server
"""
def __init__(self, message, status_code=0):
super(K8sApiTemporaryError, self).__init__(message, status_code=status_code)
class K8sApiPermanentError(K8sApiException):
"""The base class for all permanent errors where a retry will always fail until human action is taken
(authorization errors, object not found) returned when issuing requests to the K8s API server
"""
def __init__(self, message, status_code=0):
super(K8sApiPermanentError, self).__init__(message, status_code=status_code)
class K8sApiAuthorizationException(K8sApiPermanentError):
"""A wrapper around Exception that makes it easier to catch k8s authorization
exceptions
"""
def __init__(self, path, status_code=0):
super(K8sApiAuthorizationException, self).__init__(
"You don't have permission to access %s. Please ensure you have correctly configured the RBAC permissions for the scalyr-agent's service account"
% path,
status_code=status_code,
)
# K8sApiNotFoundException needs to be a TemporaryError because there are cases
# when a pod is starting up that querying the pods endpoint will return 404 Not Found
# but then the same query a few seconds later (once the pod is up and running) will return
# 200 - Ok. Having it derive from PermanentError would put it on a blacklist, when all we
# might want is to back off for a few seconds and try again
class K8sApiNotFoundException(K8sApiTemporaryError):
"""
A wrapper around Exception that makes it easier to catch not found errors when querying the k8s api
"""
def __init__(self, path, status_code=0):
super(K8sApiNotFoundException, self).__init__(
"The resource at location `%s` was not found" % path,
status_code=status_code,
)
class KubeletApiException(Exception):
"""A wrapper around Exception that makes it easier to catch k8s specific
exceptions
"""
pass
class QualifiedName(object):
"""
Represents a fully qualified name for a Kubernetes object using both its name and namespace.
"""
__slots__ = ("namespace", "name")
def __init__(self, namespace, name):
self.namespace = namespace
self.name = name
def __eq__(self, other):
return self.namespace == other.namespace and self.name == other.name
def __ne__(self, other):
return not self.__eq__(other)
def is_valid(self):
return self.namespace is not None and self.name is not None
class PodInfo(object):
"""
A collection class that stores label and other information about a kubernetes pod
"""
def __init__(
self,
name="",
namespace="",
uid="",
node_name="",
labels={},
container_names=[],
annotations={},
controller=None,
):
self.name = name
self.namespace = namespace
self.uid = uid
self.node_name = node_name
self.labels = labels
self.container_names = container_names
self.annotations = annotations
self.controller = controller # controller can't change for the life of the object so we don't include it in hash
# generate a hash we can use to compare whether or not
# any of the pod info has changed
md5 = hashlib.md5()
md5.update(name)
md5.update(namespace)
md5.update(uid)
md5.update(node_name)
# flatten the labels dict in to a single string because update
# expects a string arg. To avoid cases where the 'str' of labels is
# just the object id, we explicitly create a flattened string of
# key/value pairs
flattened = []
for k, v in six.iteritems(labels):
flattened.append(k)
flattened.append(v)
md5.update("".join(flattened))
# flatten the container names
# see previous comment for why flattening is necessary
md5.update("".join(container_names))
# flatten the annotations dict in to a single string
# see previous comment for why flattening is necessary
flattened = []
for k, v in six.iteritems(annotations):
flattened.append(k)
flattened.append(six.text_type(v))
md5.update("".join(flattened))
self.digest = md5.digest()
def exclude_pod(self, container_name=None, default=False):
"""
Returns whether or not this pod should be excluded based
on include/exclude annotations. If an annotation 'exclude' exists
then this will be returned. If an annotation 'include' exists, then
the boolean opposite of 'include' will be returned. 'include' will
always override 'exclude' if it exists.
param: container_name - if specified, and container_name exists in
the pod annotations, then the container specific annotations will
also be checked. These will supercede the pod level include/exclude
annotations
param: default - Boolean the default value if no annotations are found
return Boolean - whether or not to exclude this pod
"""
def exclude_status(annotations, default):
exclude = util.value_to_bool(annotations.get("exclude", default))
# include will always override value of exclude if both exist
exclude = not util.value_to_bool(annotations.get("include", not exclude))
return exclude
result = exclude_status(self.annotations, default)
if container_name and container_name in self.annotations:
result = exclude_status(self.annotations[container_name], result)
return result
class Controller(object):
"""
General class for all cached Controller objects
"""
def __init__(
self,
name="",
namespace="",
kind="",
parent_name=None,
parent_kind=None,
labels={},
):
self.name = name
self.namespace = namespace
self.kind = kind
self.access_time = None
self.parent_name = parent_name
self.parent_kind = parent_kind
flat_labels = []
for key, value in six.iteritems(labels):
flat_labels.append("%s=%s" % (key, value))
self.flat_labels = ",".join(flat_labels)
class ApiQueryOptions(object):
"""Options to use when querying the K8s Api server.
"""
def __init__(self, max_retries=3, return_temp_errors=True, rate_limiter=None):
"""
@param max_retries: The number of times we will retry a query if it receives a temporary error before failing.
@param return_temp_errors: If true, all non-known errors will automatically be categorized as temporary errors.
@param rate_limiter: Rate limiter for api calls
"""
self.max_retries = max_retries
self.return_temp_errors = return_temp_errors
self.rate_limiter = rate_limiter
def __repr__(self):
return (
"ApiQueryOptions\n\tmax_retries=%s\n\treturn_temp_errors=%s\n\trate_limiter=%s\n"
% (self.max_retries, self.return_temp_errors, self.rate_limiter)
)
class _K8sCache(object):
"""
A cached store of objects from a k8s api query
This is a private class to this module. See KubernetesCache which instantiates
instances of _K8sCache for querying different k8s API objects.
This abstraction is thread-safe-ish, assuming objects returned
from querying the cache are never written to.
"""
def __init__(self, processor, object_type):
"""
Initialises a Kubernetes Cache
@param processor: a _K8sProcessor object for querying/processing the k8s api
@param object_type: a string containing a textual name of the objects being cached, for use in log messages
"""
# protects self._objects and self._objects_expired
self._lock = threading.Lock()
# dict of object dicts. The outer dict is hashed by namespace,
# and the inner dict is hashed by object name
self._objects = {}
# Identical to self._objects but contains optional expired booleans for corresponding object
# New object won't have an entry. Only older objects that have been "soft purged" will be marked
# with a boolean (True).
# Note:
# Expirations should ideally be stored in the _objects dict itself alongside objects. However,
# the long-term direction for this feature is uncertain and so this is a temporary implementation
# needed to support the notion of a "soft purge".
self._objects_expired = {}
self._processor = processor
self._object_type = object_type
def shallow_copy(self):
"""Returns a shallow copy of all the cached objects dict"""
result = {}
self._lock.acquire()
try:
for k, v in six.iteritems(self._objects):
result[k] = v
finally:
self._lock.release()
return result
def __get_stale_objects(self, access_time):
"""Get all stale objects. Caller should first obtain lock on self._objects"""
stale = []
for namespace, objs in six.iteritems(self._objects):
for obj_name, obj in six.iteritems(objs):
if hasattr(obj, "access_time"):
if obj.access_time is None or obj.access_time < access_time:
stale.append((namespace, obj_name))
return stale
def mark_as_expired(self, access_time):
"""Mark all stale cache objects as expired
@param access_time: Any objects last accessed before access_time will be purged
"""
self._lock.acquire()
try:
stale = self.__get_stale_objects(access_time)
for (namespace, obj_name) in stale:
global_log.log(
scalyr_logging.DEBUG_LEVEL_1,
"Mark object %s/%s as expired in cache" % (namespace, obj_name),
)
expired_set = self._objects_expired.setdefault(namespace, {})
expired_set.setdefault(obj_name, True)
finally:
self._lock.release()
def purge_unused(self, access_time):
"""Removes any items from the store who haven't been accessed since `access_time`
@param access_time: Any objects last accessed before access_time will be purged
"""
self._lock.acquire()
try:
stale = self.__get_stale_objects(access_time)
for (namespace, obj_name) in stale:
global_log.log(
scalyr_logging.DEBUG_LEVEL_1,
"Removing object %s/%s from cache" % (namespace, obj_name),
)
self._objects[namespace].pop(obj_name, None)
self._objects_expired.get(namespace, {}).pop(obj_name, None)
finally:
self._lock.release()
def _update_object(
self, k8s, kind, namespace, name, current_time, query_options=None
):
"""update a single object, returns the object if found, otherwise return None
This method will always raise a K8SApiException upon k8s api response failure. It is the responsibility of the
caller to handle exceptions.
"""
result = None
# query k8s api and process objects
obj = k8s.query_object(kind, namespace, name, query_options=query_options)
result = self._processor.process_object(k8s, obj, query_options=query_options)
self._add_to_cache(result)
return result
def _add_to_cache(self, obj):
"""
Adds the object `obj` to the cache.
"""
# update our cache if we have an obj
if obj:
global_log.log(
scalyr_logging.DEBUG_LEVEL_2,
"Processing single %s: %s/%s"
% (self._object_type, obj.namespace, obj.name),
)
self._lock.acquire()
try:
# update the object
objects = self._objects.setdefault(obj.namespace, {})
objects[obj.name] = obj
# remove expired flag
expired_dict = self._objects_expired.setdefault(obj.namespace, {})
expired_dict.pop(obj.name, None)
finally:
self._lock.release()
def _lookup_object(
self, namespace, name, current_time, allow_expired=True, query_options=None
):
""" Look to see if the object specified by the namespace and name exists within the cached data.
Note: current_time should be provided (otherwise, access_time-based revalidation of cache won't work correctly,
for example, manifesting as unnecessary re-queries of controller metadata)
Return the object info, or None if not found
@param namespace: The object's namespace
@param name: The object's name
@param current_time last access time for the object to this value.
@param allow_expired: If true, return the object if it exists in the cache even if expired.
If false, return None if the object exists but is expired.
@type namespace: six.text_type
@type name: six.text_type
@type current_time: epoch seconds
@type allow_expired: bool
"""
result = None
self._lock.acquire()
try:
# Optionally check if the object has been marked as expired. If so, return None.
if not allow_expired:
expired = self._objects_expired.setdefault(namespace, {}).get(
name, False
)
if expired:
return None
objects = self._objects.get(namespace, {})
result = objects.get(name, None)
# update access time
if result is not None and current_time is not None:
result.access_time = current_time
finally:
self._lock.release()
return result
def is_cached(self, namespace, name, allow_expired):
"""Returns true if the specified object is in the cache and (optionally) not expired.
@param namespace: The object's namespace
@param name: The object's name
@param allow_expired: If True, an object is considered present in cache even if it is expired.
@type namespace: six.text_type
@type name: six.text_type
@type allow_expired: bool
@return: True if the object is cached. If check_expiration is True and an expiration
time exists for the object, then return True only if not expired
@rtype: bool
"""
# TODO: Look at passing down a consistent time from layers above
return (
self._lookup_object(
namespace, name, time.time(), allow_expired=allow_expired
)
is not None
)
def lookup(
self,
k8s,
current_time,
namespace,
name,
kind=None,
allow_expired=True,
query_options=None,
ignore_k8s_api_exception=False,
):
"""Returns info for the object specified by namespace and name or None if no object is found in the cache.
Querying the information is thread-safe, but the returned object should not be written to.
This method will propagate upwards K8SApiExceptions generated by k8s api response failure.
It is the responsibility of the caller to handle exceptions.
@param allow_expired: If True, an object is considered present in cache even if it is expired.
@type allow_expired: bool
"""
if kind is None:
kind = self._object_type
# see if the object exists in the cache and return it if so
result = self._lookup_object(
namespace,
name,
current_time,
allow_expired=allow_expired,
query_options=None,
)
if result:
global_log.log(
scalyr_logging.DEBUG_LEVEL_2,
"cache hit for %s %s/%s" % (kind, namespace, name),
)
return result
# we have a cache miss so query the object individually
global_log.log(
scalyr_logging.DEBUG_LEVEL_2,
"cache miss for %s %s/%s" % (kind, namespace, name),
)
result = None
try:
result = self._update_object(
k8s, kind, namespace, name, current_time, query_options=query_options
)
except K8sApiException:
if ignore_k8s_api_exception:
pass
else:
raise
return result
class _K8sProcessor(object):
"""
An abstract interface used by _K8sCache for querying a specific type of
object from the k8s api, and generating python objects from the queried result JSON.
"""
def _get_managing_controller(self, items):
"""
Processes a list of items, searching to see if one of them
is a 'managing controller', which is determined by the 'controller' field
@param items: an array containing 'ownerReferences' metadata for an object
returned from the k8s api
@return: A dict containing the managing controller of type `kind` or None if no such controller exists
"""
for i in items:
controller = i.get("controller", False)
if controller:
return i
return None
def process_object(self, k8s, obj, query_options=None):
"""
Creates a python object based of a dict
@param k8s: a KubernetesApi object
@param obj: A JSON dict returned as a response to querying
the k8s API for a specific object type.
@return a python object relevant to the
"""
raise NotImplementedError("process_object not implemented for _K8sProcessor")
class PodProcessor(_K8sProcessor):
def __init__(self, controllers):
super(PodProcessor, self).__init__()
self._controllers = controllers
def _get_controller_from_owners(self, k8s, owners, namespace, query_options=None):
"""
Processes a list of owner references returned from a Pod's metadata to see
if it is eventually owned by a Controller, and if so, returns the Controller object
@return Controller - a Controller object
"""
controller = None
# check if we are owned by another controller
owner = self._get_managing_controller(owners)
if owner is None:
return None
# make sure owner has a name field and a kind field
name = owner.get("name", None)
if name is None:
return None
kind = owner.get("kind", None)
if kind is None:
return None
# walk the parent until we get to the root controller
# Note: Parent controllers will always be in the same namespace as the child
current_time = time.time()
controller = self._controllers.lookup(
k8s,
current_time,
namespace,
name,
kind=kind,
query_options=query_options,
ignore_k8s_api_exception=True,
)
while controller:
if controller.parent_name is None:
global_log.log(
scalyr_logging.DEBUG_LEVEL_1,
"controller %s has no parent name" % controller.name,
)
break
if controller.parent_kind is None:
global_log.log(
scalyr_logging.DEBUG_LEVEL_1,
"controller %s has no parent kind" % controller.name,
)
break
# get the parent controller
parent_controller = self._controllers.lookup(
k8s,
current_time,
namespace,
controller.parent_name,
kind=controller.parent_kind,
query_options=query_options,
)
# if the parent controller doesn't exist, assume the current controller
# is the root controller
if parent_controller is None:
break
# walk up the chain
controller = parent_controller
return controller
def process_object(self, k8s, obj, query_options=None):
""" Generate a PodInfo object from a JSON object
@param k8s: a KubernetesApi object
@param pod: The JSON object returned as a response to querying
a specific pod from the k8s API
@return A PodInfo object
"""
result = {}
metadata = obj.get("metadata", {})
spec = obj.get("spec", {})
labels = metadata.get("labels", {})
annotations = metadata.get("annotations", {})
owners = metadata.get("ownerReferences", [])
pod_name = metadata.get("name", "")
namespace = metadata.get("namespace", "")
controller = self._get_controller_from_owners(
k8s, owners, namespace, query_options=query_options
)
container_names = []
for container in spec.get("containers", []):
container_names.append(container.get("name", "invalid-container-name"))
try:
annotations = annotation_config.process_annotations(annotations)
except BadAnnotationConfig as e:
global_log.warning(
"Bad Annotation config for %s/%s. All annotations ignored. %s"
% (namespace, pod_name, six.text_type(e)),
limit_once_per_x_secs=300,
limit_key="bad-annotation-config-%s"
% metadata.get("uid", "invalid-uid"),
)
annotations = JsonObject()
global_log.log(
scalyr_logging.DEBUG_LEVEL_2,
"Annotations: %s" % (six.text_type(annotations)),
)
# create the PodInfo
result = PodInfo(
name=pod_name,
namespace=namespace,
uid=metadata.get("uid", ""),
node_name=spec.get("nodeName", ""),
labels=labels,
container_names=container_names,
annotations=annotations,
controller=controller,
)
return result
class ControllerProcessor(_K8sProcessor):
def process_object(self, k8s, obj, query_options=None):
""" Generate a Controller object from a JSON object
@param k8s: a KubernetesApi object
@param obj: The JSON object returned as a response to querying
a specific controller from the k8s API
@return A Controller object
"""
metadata = obj.get("metadata", {})
kind = obj.get("kind", "")
owners = metadata.get("ownerReferences", [])
namespace = metadata.get("namespace", "")
name = metadata.get("name", "")
labels = metadata.get("labels", {})
parent_name = None
parent_kind = None
parent = self._get_managing_controller(owners)
if parent is not None:
parent_name = parent.get("name", None)
parent_kind = parent.get("kind", None)
return Controller(name, namespace, kind, parent_name, parent_kind, labels)
class _CacheConfig(object):
"""
Internal configuration options for the Kubernetes cache
"""
def __init__(
self,
api_url="https://kubernetes.default",
verify_api_queries=True,
cache_expiry_secs=30,
cache_purge_secs=300,
cache_expiry_fuzz_secs=0,
cache_start_fuzz_secs=0,
query_timeout=20,
global_config=None,
):
"""
@param api_url: the url for querying the k8s api
@param verify_api_queries: whether to verify queries to the k8s api
@param cache_expiry_secs: the number of secs to wait before updating the cache
@param cache_expiry_fuzz_secs: if greater than zero, the number of seconds to fuzz the expiration time to avoid query stampede
@param cache_start_fuzz_secs: if greater than zero, the number of seconds to fuzz the start time to avoid query stampede
@param cache_purge_secs: the number of seconds to wait before purging old controllers from the cache
@param query_timeout: The number of seconds to wait before a query to the API server times out
@param global_config: Global configuration object
@type api_url: str
@type verify_api_queries: bool
@type cache_expiry_secs: int or float
@type cache_expiry_fuzz_secs: int or float
@type cache_start_fuzz_secs: int or float
@type cache_purge_secs: int or float
@type query_timeout: int
@type global_config: Configurtion
"""
# NOTE: current implementations of __eq__ expects that fields set in contructor are only true state
# fields that affect equality. If this is ever changed, be sure to modify __eq__ accordingly
self.api_url = api_url
self.verify_api_queries = verify_api_queries
self.cache_expiry_secs = cache_expiry_secs
self.cache_expiry_fuzz_secs = cache_expiry_fuzz_secs
self.cache_start_fuzz_secs = cache_start_fuzz_secs
self.cache_purge_secs = cache_purge_secs
self.query_timeout = query_timeout
self.global_config = global_config
def __eq__(self, other):
"""Equivalence method for _CacheConfig objects so == testing works """
for key, val in self.__dict__.items():
if val != getattr(other, key):
return False
return True
def __ne__(self, other):
"""Non-Equivalence method for _CacheConfig objects because Python 2 doesn't
automatically generate != if == is defined
"""
# return result based on negation of `==` rather than negation of `__eq__`
return not (self == other)
def __repr__(self):
s = ""
for key, val in self.__dict__.items():
s += "\n\t%s: %s" % (key, val)
return s + "\n"
def need_new_k8s_object(self, new_config):
"""
Determines if a new KubernetesApi object needs to created for the cache based on the new config
@param new_config: The new config options
@type new_config: _CacheConfig
@return: True if a new KubernetesApi object should be created based on the differences between the
current and the new config. False otherwise.
"""
relevant_fields = ["api_url", "verify_api_queries", "query_timeout"]
relevant_global_config_fields = [
"agent_log_path",
"k8s_log_api_responses",
"k8s_log_api_exclude_200s",
"k8s_log_api_min_response_len",
"k8s_log_api_min_latency",
"k8s_log_api_ratelimit_interval",
]
# Verify the relevant CacheConfig fields are equal
for field in relevant_fields:
if getattr(self, field) != getattr(new_config, field):
return True
# If one of the sub global-config is null and the other isn't, return True
if bool(self.global_config) ^ bool(new_config.global_config):
return True
# If both sub global-configs are present, verify their relevant fields are equal
if self.global_config and new_config:
for field in relevant_global_config_fields:
if getattr(self.global_config, field) != getattr(
new_config.global_config, field
):
return True
return False
class _CacheConfigState(object):
"""
Class holding cache config related state
"""
class LocalState(object):
"""
Helper class containing copies of state information so that it can be used on
separate threads without worry of being changed by another thread.
"""
def __init__(self, state):
"""
Create a copy of the relevant parts of `state`
The caller should lock `state` before calling this method
@param state: the cache state
@type state: _CacheConfigState
"""
self.k8s = state.k8s
self.cache_expiry_secs = state.cache_config.cache_expiry_secs
self.cache_purge_secs = state.cache_config.cache_purge_secs
self.cache_expiry_fuzz_secs = state.cache_config.cache_expiry_fuzz_secs
self.cache_start_fuzz_secs = state.cache_config.cache_start_fuzz_secs
def __init__(self, cache_config, global_config):
"""Set default values"""
self._lock = threading.Lock()
self.k8s = None
self.cache_config = _CacheConfig(api_url="", global_config=global_config)
self._pending_config = None
self.configure(cache_config)
def copy_state(self):
"""
Get a copy of the relevant cache state in a thread-safe manner
@return: a copy of various state information, useful for the main processing thread
@rtype: LocalState
"""
self._lock.acquire()
try:
return self.LocalState(self)
finally:
self._lock.release()
def configure(self, new_cache_config):
"""
Configures the state based on any changes in the configuration.
Whenever a new configuration is detected, a new instance of KubernetesApi will be created.
The KubernetesApi however, will reference a named BlockingRateLimiter since we need to share
BlockingRateLimiters across monitors.
@param new_cache_config: the new configuration
@param global_config: global configuration object
@type new_cache_config: _CacheConfig
@type global_config: Configuration object
"""
# get old state values
old_state = self.copy_state()
need_new_k8s = False
self._lock.acquire()
try:
if self.cache_config == new_cache_config:
return
self._pending_config = new_cache_config
need_new_k8s = (
old_state.k8s is None
or self.cache_config.need_new_k8s_object(new_cache_config)
)
finally:
self._lock.release()
# create a new k8s api object if we need one
k8s = old_state.k8s
if need_new_k8s:
k8s = KubernetesApi.create_instance(
new_cache_config.global_config,
k8s_api_url=new_cache_config.api_url,
query_timeout=new_cache_config.query_timeout,
verify_api_queries=new_cache_config.verify_api_queries,
)
# update with new values
self._lock.acquire()
try:
# if new_config is not self._pending_config then it means a newer config
# came through on another thread before we finished this call and therefore
# we should avoid updating because we only want the most recent update to succeed.
# use 'is' rather than == because we want to see if they are the same object
# not if the objects are semantically identical
if new_cache_config is self._pending_config:
self.k8s = k8s
self.k8s.query_timeout = new_cache_config.query_timeout
self.cache_config = new_cache_config
self._pending_config = None
global_log.log(
scalyr_logging.DEBUG_LEVEL_1,
"Got new config %s",
six.text_type(self.cache_config),
)
finally:
self._lock.release()
class AgentPodNotReadyException(Exception):
"""Raised when the agent pod is not fully ready according to the K8s API server."""
pass
class KubernetesCache(object):
def __init__(
self,
api_url="https://kubernetes.default",
verify_api_queries=True,
cache_expiry_secs=30,
cache_expiry_fuzz_secs=0,
cache_start_fuzz_secs=0,
cache_purge_secs=300,
start_caching=True,
global_config=None,
):
self._lock = threading.Lock()
new_cache_config = _CacheConfig(
api_url=api_url,
verify_api_queries=verify_api_queries,
cache_expiry_secs=cache_expiry_secs,
cache_expiry_fuzz_secs=cache_expiry_fuzz_secs,
cache_start_fuzz_secs=cache_start_fuzz_secs,
cache_purge_secs=cache_purge_secs,
global_config=global_config,
)
# set the initial state
self._state = _CacheConfigState(new_cache_config, global_config)
# create the controller cache
self._controller_processor = ControllerProcessor()
self._controllers = _K8sCache(self._controller_processor, "<controller>")
# create the pod cache
self._pod_processor = PodProcessor(self._controllers)
self._pods_cache = _K8sCache(self._pod_processor, "Pod")
self._cluster_name = None
self._api_server_version = None
# The last time (in seconds since epoch) we updated the K8s version number via a query
self._last_api_server_version_update = 0
self._container_runtime = None
self._initialized = False
self._last_initialization_error = "Initialization not started"
self._thread = None
self._rate_limiter = None
if start_caching:
self.start()
def stop(self):
"""Stops the cache, specifically stopping the background thread that refreshes the cache"""
self._thread.stop()
def start(self):
"""
Starts the background thread that reads from the k8s cache
"""
if self._thread is None:
self._thread = StoppableThread(target=self.update_cache, name="K8S Cache")
self._thread.start()
def local_state(self):
"""
Returns a local copy of the current state
"""
return self._state.copy_state()
def update_config(self, new_cache_config):
"""
Updates the cache config
"""
self._state.configure(new_cache_config)
self._lock.acquire()
try:
if self._thread is None:
self.start()
finally:
self._lock.release()
def is_initialized(self):
"""Returns whether or not the k8s cache has been initialized with the full pod list"""
result = False
self._lock.acquire()
try:
result = self._initialized
finally:
self._lock.release()
return result
def last_initialization_error(self):
"""Returns the last error experienced while initializing the cache.
Returns None if it is initialized."""
self._lock.acquire()
try:
if self._initialized:
return None
else:
return self._last_initialization_error
finally:
self._lock.release()
def _update_initialization_error(self, component, message):
"""Updates the last initialization error message experienced.
:param component: The component of the cache being initialized when the error occurred
:param message: The error message
:type component: six.text_type
:type message: six.text_type
"""
self._lock.acquire()
try:
self._last_initialization_error = (
'Unable to initialize %s in K8s cache due to "%s"'
% (component, message)
)
finally:
self._lock.release()
def _update_cluster_name(self, k8s):
"""Updates the cluster name"""
cluster_name = k8s.get_cluster_name()
self._lock.acquire()
try:
self._cluster_name = cluster_name
finally:
self._lock.release()
def _update_api_server_version_if_necessary(self, k8s, current_time=None):
"""Update the API server version if it has not been successfully update in the last hour.
The version number is determined by querying the K8s API server."""
if current_time is None:
current_time = time.time()
# Check if we have the version set and what time we set it.
self._lock.acquire()
try:
is_version_set = self._api_server_version is not None
last_check_time = self._last_api_server_version_update
finally:
self._lock.release()
# We only update if we haven't updated it in the last hour.
if not is_version_set or current_time - last_check_time > 3600:
# Query the API server to get version.
gitver = k8s.get_api_server_version()
self._lock.acquire()
try:
self._api_server_version = gitver
self._last_api_server_version_update = current_time
finally:
self._lock.release()
def _get_runtime(self, k8s):
pod_name = k8s.get_pod_name()
pod = k8s.query_pod(k8s.namespace, pod_name)
if pod is None:
global_log.warning(
"Coud not determine K8s CRI because could not find agent pod: %s"
% pod_name,
limit_once_per_x_secs=300,
limit_key="k8s_cri_no_pod",
)
return None
status = pod.get("status", {})
containers = status.get("containerStatuses", [])
for container in containers:
name = container.get("name")
if name and name == "scalyr-agent":
# If the agent container is not ready (according the API server) we cannot get the containerID
# and therefore cannot determine the container runtime. We need to wait a little bit for the
# API server to catch up. We raise this exception which triggers the right things.
if not container.get("ready", False):
raise AgentPodNotReadyException()
containerId = container.get("containerID", "")
m = _CID_RE.match(containerId)
if m:
return m.group(1)
else:
global_log.warning(
"Coud not determine K8s CRI because agent container id did not match: %s"
% containerId,
limit_once_per_x_secs=300,
limit_key="k8s_cri_unmatched_container_id",
)
return None
global_log.warning(
"Coud not determine K8s CRI because could not find agent container in pod.",
limit_once_per_x_secs=300,
limit_key="k8s_cri_no_agent_container",
)
return None
def update_cache(self, run_state):
"""
Main thread for updating the k8s cache
"""
start_time = time.time()
retry_delay_secs = None
while run_state.is_running() and not self.is_initialized():
# get cache state values that will be consistent for the duration of the loop iteration
local_state = self._state.copy_state()
# Delay the start of this cache if we have fuzzing turned on. This will reduce the stampede of
# agents all querying the API master at the same time on large clusters (when the agents are started
# at the same time.)
if local_state.cache_start_fuzz_secs > 0:
run_state.sleep_but_awaken_if_stopped(
random.uniform(0, local_state.cache_start_fuzz_secs)
)
# Delay before reattempting to initialize the cache if we had an error last time.
if retry_delay_secs is not None:
run_state.sleep_but_awaken_if_stopped(retry_delay_secs)
if not run_state.is_running() or self.is_initialized():
continue
# Records which component is being initialized. Used in error messages below.
component = "cluster name"
try:
self._update_cluster_name(local_state.k8s)
component = "api version"
self._update_api_server_version_if_necessary(local_state.k8s)
component = "runtime"
runtime = self._get_runtime(local_state.k8s)
self._lock.acquire()
try:
self._container_runtime = runtime
self._initialized = True
self._last_initialization_error = None
finally:
self._lock.release()
except K8sApiException as e:
global_log.warn(
"K8s API exception while updating %s in K8s cache (will retry) - %s"
% (component, six.text_type(e)),
limit_once_per_x_secs=300,
limit_key="k8s_api_init_cache",
)
self._update_initialization_error(
component, "K8s API error %s" % six.text_type(e)
)
# Delay a fixed amount. TODO: Maybe do exponential backoff here?
retry_delay_secs = 0.5
except AgentPodNotReadyException:
global_log.info(
"Agent container not ready while initializing cache (will retry)",
limit_once_per_x_secs=60,
limit_key="k8s_agent_pod_not_ready",
)
self._update_initialization_error(
component, "Agent container not ready"
)
retry_delay_secs = 1.0
except Exception as e:
global_log.warn(
"Exception occurred when updating %s in K8s cache (will retry) - %s\n%s"
% (component, six.text_type(e), traceback.format_exc()),
limit_once_per_x_secs=60,
limit_key="k8s_init_generic_error",
)
self._update_initialization_error(
component, "Unhandled error %s" % six.text_type(e)
)
# Unknown error. TODO: Maybe do exponential backoff here?
retry_delay_secs = 0.5
current_time = time.time()
elapsed = current_time - start_time
global_log.info("Kubernetes cache initialized in %.2f seconds" % elapsed)
local_state = self._state.copy_state()
# go back to sleep if we haven't taken longer than the expiry time
if elapsed < local_state.cache_expiry_secs:
global_log.log(
scalyr_logging.DEBUG_LEVEL_1,
"sleeping for %.2f seconds" % (local_state.cache_expiry_secs - elapsed),
)
run_state.sleep_but_awaken_if_stopped(
local_state.cache_expiry_secs - elapsed
)
# start the main update loop
last_purge = time.time()
while run_state.is_running():
# get cache state values that will be consistent for the duration of the loop iteration
local_state = self._state.copy_state()
try:
current_time = time.time()
global_log.log(
scalyr_logging.DEBUG_LEVEL_1, "Marking unused pods as expired"
)
self._pods_cache.mark_as_expired(current_time)
self._update_cluster_name(local_state.k8s)
self._update_api_server_version_if_necessary(
local_state.k8s, current_time=current_time
)
if last_purge + local_state.cache_purge_secs < current_time:
global_log.log(
scalyr_logging.DEBUG_LEVEL_1,
"Purging unused controllers last_purge=%s cache_purge_secs=%s current_time=%s"
% (last_purge, local_state.cache_purge_secs, current_time),
)
self._controllers.purge_unused(last_purge)
# purge any pods that haven't been queried within the cache_purge_secs
global_log.log(scalyr_logging.DEBUG_LEVEL_1, "Purging stale pods")
self._pods_cache.purge_unused(last_purge)
last_purge = current_time
except K8sApiException as e:
global_log.warn(
"Exception occurred when updating k8s cache - %s"
% (six.text_type(e)),
limit_once_per_x_secs=300,
limit_key="k8s_api_update_cache",
)
except Exception as e:
global_log.warn(
"Exception occurred when updating k8s cache - %s\n%s"
% (six.text_type(e), traceback.format_exc())
)
# Fuzz how much time we spend until the next cycle. This should spread out when the agents query the
# API master over time in clusters with a larger number of agents.
if local_state.cache_expiry_fuzz_secs > 0:
fuzz_factor = max(
random.uniform(0, local_state.cache_expiry_fuzz_secs), 0
)
else:
fuzz_factor = 0
run_state.sleep_but_awaken_if_stopped(
local_state.cache_expiry_secs - fuzz_factor
)
def pod(
self,
namespace,
name,
current_time=None,
allow_expired=True,
query_options=None,
ignore_k8s_api_exception=False,
):
"""Returns pod info for the pod specified by namespace and name or None if no pad matches.
Warning: Failure to pass current_time leads to incorrect recording of last access times, which will
lead to these objects being refreshed prematurely (potential source of bugs)
Querying the pod information is thread-safe, but the returned object should
not be written to.
@param allow_expired: If True, an object is considered present in cache even if it is expired.
@type allow_expired: bool
"""
local_state = self._state.copy_state()
if local_state.k8s is None:
return
return self._pods_cache.lookup(
local_state.k8s,
current_time,
namespace,
name,
kind="Pod",
allow_expired=allow_expired,
query_options=query_options,
ignore_k8s_api_exception=ignore_k8s_api_exception,
)
def is_pod_cached(self, namespace, name, allow_expired):
"""Returns true if the specified pod is in the cache and isn't expired.
Warning: Failure to pass current_time leads to incorrect recording of last access times, which will
lead to these objects being refreshed prematurely (potential source of bugs)
@param namespace: The pod's namespace
@param name: The pod's name
@param allow_expired: If True, an object is considered present in cache even if it is expired.
@type namespace: str
@type name: str
@type allow_expired: bool
@return: True if the pod is cached.
@rtype: bool
"""
return self._pods_cache.is_cached(namespace, name, allow_expired)
def controller(self, namespace, name, kind, current_time=None, query_options=None):
"""Returns controller info for the controller specified by namespace and name
or None if no controller matches.
Warning: Failure to pass current_time leads to incorrect recording of last access times, which will
lead to these objects being refreshed prematurely (potential source of bugs)
Querying the controller information is thread-safe, but the returned object should
not be written to.
"""
local_state = self._state.copy_state()
if local_state.k8s is None:
return
return self._controllers.lookup(
local_state.k8s,
current_time,
namespace,
name,
kind=kind,
query_options=query_options,
ignore_k8s_api_exception=True,
)
def pods_shallow_copy(self):
"""Retuns a shallow copy of the pod objects"""
return self._pods_cache.shallow_copy()
def get_cluster_name(self):
"""Returns the cluster name"""
result = None
self._lock.acquire()
try:
result = self._cluster_name
finally:
self._lock.release()
return result
def get_container_runtime(self):
"""Returns the k8s container runtime currently being used"""
result = None
self._lock.acquire()
try:
result = self._container_runtime
finally:
self._lock.release()
return result
def get_api_server_version(self):
"""Returns API server version"""
result = None
self._lock.acquire()
try:
result = self._api_server_version
finally:
self._lock.release()
return result
class KubernetesApi(object):
"""Simple wrapper class for querying the k8s api
"""
@staticmethod
def create_instance(
global_config,
k8s_api_url=None,
query_timeout=None,
verify_api_queries=None,
rate_limiter_key="K8S_CACHE_MAIN_RATELIMITER",
):
"""
@param global_config: Global configuration
@param k8s_api_url: overrides global config api url
@param query_timeout: overrides global config query timeout
@param verify_api_queries: overrides global config verify_api_queries
@param rate_limiter_key: Allow overriding of rate limiter key, otherwise, uses the "main" k8s cache ratelimiter
"""
if k8s_api_url is None:
k8s_api_url = global_config.k8s_api_url
if query_timeout is None:
query_timeout = global_config.k8s_cache_query_timeout_secs
if verify_api_queries is None:
verify_api_queries = global_config.k8s_verify_api_queries
kwargs = {
"k8s_api_url": k8s_api_url,
"query_timeout": query_timeout,
}
if not verify_api_queries:
kwargs["ca_file"] = None
elif global_config:
kwargs["ca_file"] = global_config.k8s_service_account_cert
if global_config:
kwargs.update(
{
"log_api_responses": global_config.k8s_log_api_responses,
"log_api_exclude_200s": global_config.k8s_log_api_exclude_200s,
"log_api_min_response_len": global_config.k8s_log_api_min_response_len,
"log_api_min_latency": global_config.k8s_log_api_min_latency,
"log_api_ratelimit_interval": global_config.k8s_log_api_ratelimit_interval,
"agent_log_path": global_config.agent_log_path,
"query_options_max_retries": global_config.k8s_controlled_warmer_max_query_retries,
"rate_limiter": BlockingRateLimiter.get_instance(
rate_limiter_key, global_config, logger=global_log
),
"token_file": global_config.k8s_service_account_token,
"namespace_file": global_config.k8s_service_account_namespace,
}
)
return KubernetesApi(**kwargs)
def __init__(
self,
ca_file="/run/secrets/kubernetes.io/serviceaccount/ca.crt",
k8s_api_url="https://kubernetes.default",
query_timeout=20,
log_api_responses=False,
log_api_exclude_200s=False,
log_api_min_response_len=False,
log_api_min_latency=0.0,
log_api_ratelimit_interval=300,
agent_log_path=None,
query_options_max_retries=3,
rate_limiter=None,
token_file="/var/run/secrets/kubernetes.io/serviceaccount/token",
namespace_file="/var/run/secrets/kubernetes.io/serviceaccount/namespace",
):
"""Init the kubernetes object"""
self.log_api_responses = log_api_responses
self.log_api_exclude_200s = log_api_exclude_200s
self.log_api_min_response_len = log_api_min_response_len
self.log_api_min_latency = log_api_min_latency
self.log_api_ratelimit_interval = log_api_ratelimit_interval
self.agent_log_path = agent_log_path
self._http_host = k8s_api_url
global_log.log(
scalyr_logging.DEBUG_LEVEL_1, "Kubernetes API host: %s", self._http_host
)
self.query_timeout = query_timeout
self._session = None
self._ca_file = ca_file
# We create a few headers ahead of time so that we don't have to recreate them each time we need them.
self._standard_headers = {
"Connection": "Keep-Alive",
"Accept": "application/json",
}
# The k8s API requires us to pass in an authentication token
# which we can obtain from a token file in a 'well known' location
self.token = ""
try:
# using with is ok here, because we need to be running
# a recent version of python for various 3rd party libs
f = open(token_file, "r")
try:
self.token = f.read()
finally:
f.close()
except IOError:
pass
# get the namespace this pod is running on
self.namespace = "default"
try:
# using with is ok here, because we need to be running
# a recent version of python for various 3rd party libs
f = open(namespace_file, "r")
try:
self.namespace = f.read()
finally:
f.close()
except IOError:
pass
self._standard_headers["Authorization"] = "Bearer %s" % (self.token)
# A rate limiter should normally be passed unless no rate limiting is desired.
self._query_options_max_retries = query_options_max_retries
self._rate_limiter = rate_limiter
@property
def default_query_options(self):
if not self._rate_limiter:
return None
return ApiQueryOptions(
max_retries=self._query_options_max_retries, rate_limiter=self._rate_limiter
)
def _verify_connection(self):
""" Return whether or not to use SSL verification
"""
if self._ca_file:
return self._ca_file
return False
def _ensure_session(self):
"""Create the session if it doesn't exist, otherwise do nothing
"""
if not self._session:
self._session = requests.Session()
self._session.headers.update(self._standard_headers)
def get_pod_name(self):
""" Gets the pod name of the pod running the scalyr-agent """
# 2->TODO in python2 os.environ returns 'str' type. Convert it to unicode.
return os_environ_unicode.get("SCALYR_K8S_POD_NAME") or os_environ_unicode.get(
"HOSTNAME"
)
def get_node_name(self, pod_name):
""" Gets the node name of the node running the agent """
# 2->TODO in python2 os.environ returns 'str' type. Convert it to unicode.
node = os_environ_unicode.get("SCALYR_K8S_NODE_NAME")
if not node:
pod = self.query_pod(self.namespace, pod_name)
spec = pod.get("spec", {})
node = spec.get("nodeName")
return node
def get_api_server_version(self):
"""Get the API server version (specifically the server gitVersion)
@return: The gitVersion extracted from /version JSON
@rtype: str
"""
version_map = self.query_api_with_retries(
"/version",
retry_error_context="get_api_server_version",
retry_error_limit_key="get_api_server_version",
)
return version_map.get("gitVersion")
def get_cluster_name(self):
""" Returns the name of the cluster running this agent.
There is no way to get this from the k8s API so we check the following:
If the environment variable SCALYR_K8S_CLUSTER_NAME is set, then use that.
Otherwise query the api for the pod running the agent container and check to see
if it has an annotation: agent.config.scalyr.com/cluster_name, and if so, use that.
Otherwise return None
"""
# 2->TODO in python2 os.environ returns 'str' type. Convert it to unicode.
cluster = os_environ_unicode.get("SCALYR_K8S_CLUSTER_NAME", "")
if cluster:
return cluster
pod_name = self.get_pod_name()
pod = self.query_pod(self.namespace, pod_name)
if pod is None:
return None
metadata = pod.get("metadata", {})
annotations = metadata.get("annotations", {})
if "agent.config.scalyr.com/cluster_name" in annotations:
return annotations["agent.config.scalyr.com/cluster_name"]
return None
def query_api_with_retries(
self,
query,
query_options="not-set",
retry_error_context=None,
retry_error_limit_key=None,
):
"""Invoke query api through rate limiter with retries
@param query: Query string
@param query_options: ApiQueryOptions containing retries and rate_limiter.
Explicit None signifies no rate limiting.
Default 'not-set' signifies "use k8s-instance specific rate limiter and query options
@param retry_error_context: context object whose string representation is logged upon failure (if None)
@param retry_error_limit_key: key for limiting retry logging
@type query: str
@type query_options: ApiQueryOptions
@type retry_error_context: object
@type retry_error_limit_key: str
@return: json-decoded response of the query api call
@rtype: dict or a scalyr_agent.json_lib.objects.JsonObject
"""
if not query_options:
return self.query_api(query)
if query_options == "not-set":
query_options = self.default_query_options
retries_left = query_options.max_retries
rate_limiter = query_options.rate_limiter
while True:
t = time.time()
token = rate_limiter.acquire_token()
rate_limit_outcome = False
try:
result = self.query_api(
query,
return_temp_errors=query_options.return_temp_errors,
rate_limited=True,
)
rate_limit_outcome = True
global_log.log(
scalyr_logging.DEBUG_LEVEL_3,
"Rate limited k8s api query took %s seconds" % (time.time() - t),
)
return result
except K8sApiNotFoundException:
# catch and re-raise this before any other temporary errors, because we need to
# handle this one separately. Rather than immediately retrying, we won't do anything,
# rather, if the agent wants to query this endpoint again later then it will.
# This is useful for when a pod hasn't fully started up yet and querying its endpoint
# will return a 404. Then if you query again a few seconds later everything works.
rate_limit_outcome = True
raise
except K8sApiTemporaryError as e:
rate_limit_outcome = False
if retries_left <= 0:
raise e
retries_left -= 1
if retry_error_context:
global_log.warn(
"k8s API - retrying temporary error: %s" % retry_error_context,
limit_once_per_x_secs=300,
limit_key="k8s_api_retry-%s" % retry_error_limit_key,
)
finally:
# Any uncaught exceptions will result in an outcome of False
rate_limiter.release_token(token, rate_limit_outcome)
def __open_api_response_log(self, path, rate_limited):
"""Opens a file for logging the api response
The file will be located in agent_log_dir/kapi/(limited/not_limited) depending on whether the
api call is rate limited or not.
@param path: The URL path to be queried (also embedded in the filename)
@param rate_limited: Whether the response is rate limited or not
@type path: str
@type rate_limited: bool
@returns File handle to the api response log file or None upon failure.
@rtype: file handle
"""
# try to open the logged_response_file
try:
kapi = os.path.join(self.agent_log_path, "kapi")
if not os.path.exists(kapi):
os.mkdir(kapi, 0o755)
if rate_limited:
kapi = os.path.join(kapi, "limited")
else:
kapi = os.path.join(kapi, "limited")
if not os.path.exists(kapi):
os.mkdir(kapi, 0o755)
fname = "%s_%.20f_%s_%s" % (
strftime("%Y%m%d-%H-%M-%S", gmtime()),
time.time(),
random.randint(1, 100),
path.replace("/", "--"),
)
# if logging responses to disk, always prepend the stack trace for easier debugging
return open(os.path.join(kapi, fname), "w")
except IOError:
pass
def __check_for_fake_response(self, logged_response_file):
"""Helper method that checks for a well known file on disk and simulates timeouts from API master
If successfully logging responses, we can also check for a local "simfile" to simulate API master timeout.
The simfile is a textfile that contains the HTTP error code we want to simulate.
This method should only be called during development.
@param logged_response_file: Logged-response logfile
@type logged_response_file: file handle
@raises K8sApiTemporaryError: if the simfile contains one of the following http error codes that we consider
as a temporary error.
"""
fake_response_file = os.path.join(self.agent_log_path, "simfile")
if os.path.isfile(fake_response_file):
fake_response_code = None
try:
fake_f = open(fake_response_file, "r")
try:
fake_response_code = fake_f.read().strip()
finally:
fake_f.close()
except Exception:
if logged_response_file:
logged_response_file.write(
"Error encountered while attempting to fake a response code:\n%s\n\n"
% traceback.format_exc()
)
if fake_response_code in ["404", "503", "429"]:
global_log.log(
scalyr_logging.DEBUG_LEVEL_3,
"Faking api master temporary error (%s) for url: %s",
limit_once_per_x_secs=300,
limit_key="k8s_api_query_fake_temporary_error",
)
raise K8sApiTemporaryError("Fake %s" % fake_response_code)
def query_api(self, path, pretty=0, return_temp_errors=False, rate_limited=False):
""" Queries the k8s API at 'path', and converts OK responses to JSON objects
"""
self._ensure_session()
pretty = "pretty=%d" % pretty
if "?" in path:
pretty = "&%s" % pretty
else:
pretty = "?%s" % pretty
url = self._http_host + path + pretty
response = None
# Various state used logging of responses
log_responses = self.log_api_responses
logged_response = []
response_status_code = -1
response_len = 0
try:
# Optionally prepend stack trace into logged response
if log_responses:
limited_txt = ""
if rate_limited:
limited_txt = " (rate limited)"
logged_response.append("k8s.query_api%s: %s" % (limited_txt, path))
stack_trace = "".join(traceback.format_stack())
logged_response.append("\\n%s" % stack_trace.replace("\n", "\\n"))
# Make actual API call
latency = float("inf")
examine_latency = log_responses and self.log_api_min_latency > 0
if examine_latency:
t1 = time.time()
try:
response = self._session.get(
url, verify=self._verify_connection(), timeout=self.query_timeout
)
response.encoding = "utf-8"
except Exception as e:
if return_temp_errors:
raise K8sApiTemporaryError(
"Temporary error seen while accessing api: %s"
% six.text_type(e)
)
else:
raise
finally:
# conditionally record latency regardless of exception
if examine_latency:
latency = time.time() - t1
# No exception case: record response status code and length
response_status_code = response.status_code
if response.text:
response_len = len(response.text)
if response.status_code != 200:
if response.status_code == 401 or response.status_code == 403:
raise K8sApiAuthorizationException(
path, status_code=response.status_code
)
elif response.status_code == 404:
raise K8sApiNotFoundException(
path, status_code=response.status_code
)
global_log.log(
scalyr_logging.DEBUG_LEVEL_3,
"Invalid response from K8S API.\n\turl: %s\n\tstatus: %d\n\tresponse length: %d"
% (url, response.status_code, len(response.text)),
limit_once_per_x_secs=300,
limit_key="k8s_api_query",
)
if return_temp_errors:
raise K8sApiTemporaryError(
"Invalid response from Kubernetes API when querying '%s': %s"
% (path, six.text_type(response)),
status_code=response.status_code,
)
else:
raise K8sApiException(
"Invalid response from Kubernetes API when querying '%s': %s"
% (path, six.text_type(response)),
status_code=response.status_code,
)
# Optionally prepend stack trace into logged response
# newlines should become literal '\n' so that the entire response is a single line
if log_responses and response.text:
logged_response.append(response.text.replace("\n", "\\n"))
return util.json_decode(response.text)
finally:
# Only debug-log the response if all criteria (status code, latency, response len) are satisfied
if log_responses:
# Always log non-200 responses (using integer division to inspect first digit.
# For 200 responses, only log if log_api_exclude_200s is False.
if (response_status_code // 100) != 2 or not self.log_api_exclude_200s:
if (
response_len >= self.log_api_min_response_len
and latency >= self.log_api_min_latency
):
# Log the url, stacktrace and response text as a single line of text
global_log.log(
scalyr_logging.DEBUG_LEVEL_1,
"\\n\\n".join(logged_response),
limit_once_per_x_secs=self.log_api_ratelimit_interval,
limit_key="query-api-log-resp-%s"
% util.md5_hexdigest(path),
)
def query_object(self, kind, namespace, name, query_options=None):
""" Queries a single object from the k8s api based on an object kind, a namespace and a name
An empty dict is returned if the object kind is unknown, or if there is an error generating
an appropriate query string
@param kind: the kind of the object
@param namespace: the namespace to query in
@param name: the name of the object
@return - a dict returned by the query
"""
if kind not in _OBJECT_ENDPOINTS:
global_log.warn(
"k8s API - tried to query invalid object type: %s, %s, %s. Creating dummy object"
% (kind, namespace, name),
limit_once_per_x_secs=300,
limit_key="k8s_api_query-%s" % kind,
)
if kind is None:
kind = "<invalid>"
# return a dummy object with valid kind, namespace and name members
return {"kind": kind, "metadata": {"namespace": namespace, "name": name}}
query = None
try:
query = _OBJECT_ENDPOINTS[kind]["single"].substitute(
name=name, namespace=namespace
)
except Exception as e:
global_log.warn(
"k8s API - failed to build query string - %s" % (six.text_type(e)),
limit_once_per_x_secs=300,
limit_key="k8s_api_build_query-%s" % kind,
)
return {}
return self.query_api_with_retries(
query,
query_options=query_options,
retry_error_context="%s, %s, %s" % (kind, namespace, name),
retry_error_limit_key="query_object-%s" % kind,
)
def query_objects(self, kind, namespace=None, filter=None):
""" Queries a list of objects from the k8s api based on an object kind, optionally limited by
a namespace and a filter
A dict containing an empty 'items' array is returned if the object kind is unknown, or if there is an error generating
an appropriate query string
"""
if kind not in _OBJECT_ENDPOINTS:
global_log.warn(
"k8s API - tried to list invalid object type: %s, %s"
% (kind, namespace),
limit_once_per_x_secs=300,
limit_key="k8s_api_list_query-%s" % kind,
)
return {"items": []}
query = _OBJECT_ENDPOINTS[kind]["list-all"]
if namespace:
try:
query = _OBJECT_ENDPOINTS[kind]["list"].substitute(namespace=namespace)
except Exception as e:
global_log.warn(
"k8s API - failed to build namespaced query list string - %s"
% (six.text_type(e)),
limit_once_per_x_secs=300,
limit_key="k8s_api_build_list_query-%s" % kind,
)
if filter:
query = "%s?fieldSelector=%s" % (
query,
six.moves.urllib.parse.quote(filter),
)
return self.query_api_with_retries(
query,
retry_error_context="%s, %s" % (kind, namespace),
retry_error_limit_key="query_objects-%s" % kind,
)
def query_pod(self, namespace, name):
"""Convenience method for query a single pod"""
return self.query_object("Pod", namespace, name)
def query_pods(self, namespace=None, filter=None):
"""Convenience method for query a single pod"""
return self.query_objects("Pod", namespace, filter)
def query_namespaces(self):
"""Wrapper to query all namespaces"""
return self.query_api_with_retries(
"/api/v1/namespaces",
retry_error_context="query_pods",
retry_error_limit_key="query_pods",
)
def stream_events(self, path="/api/v1/watch/events", last_event=None):
"""Streams k8s events from location specified at path"""
self._ensure_session()
url = self._http_host + path
if last_event:
resource = "resourceVersion=%s" % six.text_type(last_event)
if "?" in url:
resource = "&%s" % resource
else:
resource = "?%s" % resource
url += resource
response = self._session.get(
url,
verify=self._verify_connection(),
timeout=self.query_timeout,
stream=True,
)
if response.status_code != 200:
global_log.log(
scalyr_logging.DEBUG_LEVEL_0,
"Invalid response from K8S API.\n\turl: %s\n\tstatus: %d\n\tresponse length: %d"
% (url, response.status_code, len(response.text)),
limit_once_per_x_secs=300,
limit_key="k8s_stream_events",
)
raise K8sApiException(
"Invalid response from Kubernetes API when querying %d - '%s': %s"
% (response.status_code, path, six.text_type(response)),
status_code=response.status_code,
)
for line in response.iter_lines():
if line:
yield line
class KubeletApi(object):
"""
A class for querying the kubelet API
"""
def __init__(
self,
k8s,
host_ip=None,
kubelet_url_template=Template("https://${host_ip}:10250"),
):
"""
@param k8s - a KubernetesApi object
"""
self._host_ip = host_ip
if self._host_ip is None:
try:
pod_name = k8s.get_pod_name()
pod = k8s.query_pod(k8s.namespace, pod_name)
status = pod.get("status", {})
self._host_ip = status.get("hostIP", None)
# Don't raise exception for now
# if host_ip is None:
# raise KubeletApiException( "Unable to get host IP for pod: %s/%s" % (k8s.namespace, pod_name) )
except Exception:
global_log.exception("couldn't get host ip")
pass
self._session = requests.Session()
headers = {
"Accept": "application/json",
"Authorization": "Bearer %s" % k8s.token,
}
self._session.headers.update(headers)
global_log.info("KubeletApi host ip = %s" % self._host_ip)
self._kubelet_url = self._build_kubelet_url(kubelet_url_template, host_ip)
self._fallback_kubelet_url = self._build_kubelet_url(
FALLBACK_KUBELET_URL_TEMPLATE, host_ip
)
self._timeout = 20.0
@staticmethod
def _build_kubelet_url(kubelet_url, host_ip):
if host_ip:
return kubelet_url.substitute(host_ip=host_ip)
return None
def _switch_to_fallback(self):
self._kubelet_url = self._fallback_kubelet_url
def query_api(self, path):
""" Queries the kubelet API at 'path', and converts OK responses to JSON objects
"""
while True:
url = self._kubelet_url + path
# We suppress warnings here to avoid spam about an unverified connection going to stderr.
# This method of warning suppression is not thread safe and has a small chance of suppressing warnings from
# other threads if they are emitted while this request is going.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=Warning)
response = self._session.get(url, timeout=self._timeout, verify=False)
if self._kubelet_url.startswith("https://"):
global_log.warn(
"Accessing Kubelet with an unverified HTTPS request.",
limit_once_per_x_secs=3600,
limit_key="unverified-kubelet-request",
)
response.encoding = "utf-8"
if response.status_code != 200:
if (
response.status_code == 403
and self._kubelet_url != self._fallback_kubelet_url
):
global_log.log(
scalyr_logging.DEBUG_LEVEL_3,
"Invalid response while querying the Kubelet API: %d. Falling back to older endpoint."
% response.status_code,
)
self._switch_to_fallback()
continue
else:
global_log.log(
scalyr_logging.DEBUG_LEVEL_3,
"Invalid response from Kubelet API.\n\turl: %s\n\tstatus: %d\n\tresponse length: %d"
% (url, response.status_code, len(response.text)),
limit_once_per_x_secs=300,
limit_key="kubelet_api_query",
)
raise KubeletApiException(
"Invalid response from Kubelet API when querying '%s': %s"
% (path, six.text_type(response))
)
return util.json_decode(response.text)
def query_pods(self):
return self.query_api("/pods")
def query_stats(self):
return self.query_api("/stats/summary")
class DockerMetricFetcher(object):
"""Allows for parallel fetching of container metrics from Docker. Typically, one instance of this object
will be created per monitor (Docker or Kubernetes). This current implementation relies on threads to
issue multiple `stats` requests in parallel.
This approach is necessary because the `stats` Docker command blocks for 2 seconds while it gathers
cpu measures over the interval. If we had 40 containers whose metrics we were trying to retrieve, we would
have to wait for a total of 80 seconds if we issued the `stats` request one at a time.
To get the benefit of this approach, you must first invoke `prefetch_metrics` for each container whose metrics
you wish to retrieve, and then invoke `get_metrics` to actually get the metrics.
"""
def __init__(self, docker_client, concurrency):
"""
@param docker_client: The docker client object to use for issuing `stats` requests.
@param concurrency: The maximum number of `stats` requests to issue in parallel. This controls the maximum
number of threads that will be created.
@type docker_client: k8s_test.MetricFaker
@type concurrency: int
"""
self.__docker_client = docker_client
self.__concurrency = concurrency
# A sentinel value used in the `__container_scoreboard` to indicate the container is in the queue to be fetched.
self.__PENDING = dict()
# A sentinel value used in the `__container_scoreboard` to indicate the `stats` call for a container has been
# issued but no response has been received.
self.__IN_FLIGHT = dict()
# The lock that must be held for all other state variables in this class.
self.__lock = threading.Lock()
# Records the state of requesting metrics for all containers. Maps the container name to its state or
# metric value. If the value is __PENDING, then the `stats` request for the request has not been issued.
# If it is __IN_FLIGHT, it has been requested. If it is None, an error occurred. Otherwise, the value
# is the result of the `stats` request.
self.__container_scoreboard = dict()
# Whether or not `stop` has been invoked.
self.__is_stopped = False
# The conditional variable that can be waited on to be notified of any changes to the state of this object,
# such as whether it has been stopped or if a stats results has been added in to `__container_scoreboard`.
self.__cv = threading.Condition(self.__lock)
# The number of worker threads (to perform `stats` calls) that have been created. This will always be
# less than `concurrency`.
self.__num_worker_threads = 0
# A list of containers whose metrics should be fetched. This is the same as all entries in
# `__container_scoreboard` whose value is `__PENDING`.
self.__pending_fetches = []
# The total number of containers in `__container_scoreboard` with value either `__PENDING` or `__IN_FLIGHT`.
self.__remaining_metrics_count = 0
# The number of worker threads blocked, waiting for a container to fetch its metrics.
self.__idle_workers_count = 0
def prefetch_metrics(self, container_id):
"""Initiates requesting invoking `stats` for the specified container. If you invoke this, you must
also eventually invoke `get_metrics` with the same container. By invoking this first, the `get_metrics`
call will take less time when issuing many `stats` requests.
Whenever possible, you should first invoke this method for all containers whose metrics you wish to request
before any call to `get_metrics`.
The behavior is not well defined if you invoke `prefetch_metrics` multiple times for a container before
invoking `get_metrics` for it.
@param container_id: The id of the container to fetch.
@type container_id: str
"""
self.__lock.acquire()
try:
if container_id not in self.__container_scoreboard:
self._add_fetch_task(container_id)
finally:
self.__lock.release()
def get_metrics(self, container_id):
"""Blocks until the `stats` call for the specified container is received. If `prefetch_metrics` was not
invoked already for this container, then the `stats` request will be issued.
@param container_id: The container whose metrics should be fetched.
@type container_id: str
@return The metrics for the container, or None if there was an error or if `stop` was invoked on this object.
@rtype JSON
"""
self.__lock.acquire()
try:
while True:
if self.__is_stopped:
return None
# Fetch the result if it was prefetched.
if container_id not in self.__container_scoreboard:
self._add_fetch_task(container_id)
status = self.__container_scoreboard[container_id]
if status is not self.__PENDING and status is not self.__IN_FLIGHT:
result = self.__container_scoreboard[container_id]
del self.__container_scoreboard[container_id]
return result
# Otherwise no result has been received yet.. wait..
self.__cv.wait()
finally:
self.__lock.release()
def stop(self):
"""Stops the fetcher. Any calls blocking on `get_metrics` will finish and return `None`. All threads
started by this instance will be stopped (though, this method does not wait on them to terminate).
"""
self.__lock.acquire()
try:
self.__is_stopped = True
# Notify all threads that may be waiting on a new container or waiting on a metric result that we have
# been stopped.
self.__cv.notifyAll()
finally:
self.__lock.release()
def idle_workers(self):
"""
Used for testing.
@return: The number of worker threads currently blocking, waiting for a container whose metrics need fetching.
@rtype: int
"""
self.__lock.acquire()
try:
return self.__idle_workers_count
finally:
self.__lock.release()
def _get_fetch_task(self):
"""Blocks until either there is a new container whose metrics need to be fetched or until this instance
is stopped.
@return: A tuple containing the container whose metrics should be fetched and a boolean indicating if the
instance has been stopped. If it has been stopped, the container will be None.
@rtype: (str, bool)
"""
self.__lock.acquire()
try:
while True:
if self.__is_stopped:
return None, True
if len(self.__pending_fetches) > 0:
container = self.__pending_fetches.pop(0)
self.__container_scoreboard[container] = self.__PENDING
self.__idle_workers_count -= 1
return container, False
self.__cv.wait()
finally:
self.__lock.release()
def __start_workers(self, count):
"""Start `count` worker threads that will fetch metrics results.
@param count: The number of threads to start.
@type count: int
"""
new_number_workers = min(self.__concurrency, count + self.__num_worker_threads)
for i in range(self.__num_worker_threads, new_number_workers):
x = threading.Thread(target=self.__worker)
# Set daemon so this thread does not need to be finished for the overall process to stop. This allows
# the process to terminate even if a `stats` request is still in-flight.
x.setDaemon(True)
x.start()
self.__num_worker_threads += 1
# For accounting purposes,we consider the thread idle until it actually has a container it is fetching.
self.__idle_workers_count += 1
def __worker(self):
"""The body for the worker threads.
"""
while True:
# Get the next container to fetch if there is one.
container_id, is_stopped = self._get_fetch_task()
if is_stopped:
return
result = None
try:
global_log.log(
scalyr_logging.DEBUG_LEVEL_3,
"Attempting to retrieve metrics for cid=%s" % container_id,
)
result = self.__docker_client.stats(
container=container_id, stream=False
)
except Exception as e:
global_log.error(
"Error readings stats for '%s': %s\n%s"
% (container_id, six.text_type(e), traceback.format_exc()),
limit_once_per_x_secs=300,
limit_key="api-stats-%s" % container_id,
)
self._record_fetch_result(container_id, result)
def _add_fetch_task(self, container_id):
"""Adds the specified container to the list of containers whose metrics will be fetched. Eventually, a worker
thread will grab this container and fetch its metrics.
IMPORTANT: callers must hold `__lock` when invoking this method.
@param container_id: The container whose metrics should be fetched.
@type container_id: str
"""
self.__remaining_metrics_count += 1
self.__container_scoreboard[container_id] = self.__PENDING
self.__pending_fetches.append(container_id)
# Notify any worker threads waiting for a container.
self.__cv.notifyAll()
# We need to spin up new worker threads if the amount of remaining metrics (PENDING or IN-FLIGHT) is greater
# than the number of threads we already have.
if (
self.__remaining_metrics_count > self.__num_worker_threads
and self.__num_worker_threads < self.__concurrency
):
self.__start_workers(
self.__remaining_metrics_count - self.__num_worker_threads
)
def _record_fetch_result(self, container_id, result):
"""Record that the `stats` result for the specified container. If there was an error, result should be
None.
@type container_id: str
@type result: JsonObject
"""
self.__lock.acquire()
try:
self.__container_scoreboard[container_id] = result
self.__remaining_metrics_count -= 1
# Since this is only invoked by a worker once their stats call is done, we know they are now idle.
self.__idle_workers_count += 1
# Wake up any thread that was waiting on this result.
self.__cv.notifyAll()
finally:
self.__lock.release()
def _create_k8s_cache():
"""
creates a new k8s cache object
"""
return KubernetesCache(start_caching=False)
# global cache object - the module loading system guarantees this is only ever
# initialized once, regardless of how many modules import k8s.py
_k8s_cache = _create_k8s_cache()
|
# -*- coding: utf-8 -*-
"""
<author>
<Date>
Functions to fetch (historic, current or prognosed) weather data from wunderground weather stations.
Required: API key, to be added to opengrid.cfg.
"""
# import libraries specific to wunderground script
import datetime
import pytz
import pandas as pd
import pdb
import urllib2
import json
from pprint import pprint
import time
#Script to obtain CURRENT weather detail readings
def fetch_curr_conditions(apikey, city, prop='temp_c'):
'''
Generic function for getting current weather conditions in a specified city.
Parameters
----------
* apikey : String
Wunderground API key (should be obtained after subscription)
* city : String
location of weather station
* prop : String
Type of weather property to look up.
Author: Ryton & comments from Saroele
Output: double return with value, date/time. If an error occurs, the json response will be printed.
'''
#
URL = ''.join(['http://api.wunderground.com/api/',apikey,'/geolookup/conditions/q/EU/',city,'.json'])
f = urllib2.urlopen(URL)
json_string = f.read()
parsed_json = json.loads(json_string)
#pprint(parsed_json)
try:
location = parsed_json['location']['city']
curr_value = float(parsed_json['current_observation'][prop] )
currdate = datetime.date.today()
except:
pprint(parsed_json)
raise
f.close()
return curr_value, currdate
def Details_Temp_Xdaysago(key,city,x_days_ago = 5,prop = "temp_c",PDcolumnname ='T_out'):
#get more subhourly details for a single day, eg. day 5
x_days_ago_date = datetime.datetime.today()- datetime.timedelta(days=x_days_ago)
Temp_values = Fetch_historic_temp_ByDate(key,city,x_days_ago_date,prop,PDcolumnname)
return Temp_values
#some scripts for HISTORIC data fetching
def Fetch_historic_tempYMD(key,city,year,month,day, prop = 'tempm',PDcolumnname= 'T_out' ):
# example URL: http://api.wunderground.com/api/<key>/history_20140808/q/EU/Leuven.json
# get temp df using year month day.
d = datetime.datetime(year,month,day,0,0)
datestr = '{:%Y%m%d}'.format(d)
URL = ''.join(['http://api.wunderground.com/api/',key,'/history_',datestr,'/q/BE/',city,'.json'])
f = urllib2.urlopen(URL)
json_string = f.read()
parsed_json = json.loads(json_string)
#print json_string
#pprint(parsed_json["history"]['dailysummary'])
hr = "hour"
minu = "min"
time_list=[]
temp_c_list= []
for entry in parsed_json["history"]["observations"]:
#pprint(entry)
hour_value = entry["date"][hr]
min_value = entry["date"][minu]
temp_c = entry[prop]
temp_c_list.append(float(temp_c))
concattime = datetime.datetime(year,month, day,int(hour_value),int(min_value))
time_list.append(concattime)
#print concattime, temp_c
# print shape(temp_c_list), shape(time_list)
Tout_h = pd.DataFrame(temp_c_list,columns = [PDcolumnname],index = time_list)
return Tout_h
f.close()
def Fetch_historic_temp_ByDate(key,city,date_object, prop = 'tempm',PDcolumnname= 'T_out' ):
#same as above, but with dateobject as input. hour / min are ignored
year = date_object.year
month = date_object.month
day = date_object.day
Tout_h = Fetch_historic_tempYMD(key,city,year,month,day, prop ,PDcolumnname)
return Tout_h
#scripts for Historic day-AVERAGE weather fetching
def Fetch_historic_dayaverage(key,city,year,month,day,prop = "meantempm",PDcolumnname ='T_out'):
# example URL: http://api.wunderground.com/api/<key>/history_20140808/q/EU/Leuven.json
# get temp df using year month day.
#city = 'Geel'
d = datetime.datetime(year,month,day,0,0)
datestr = '{:%Y%m%d}'.format(d)
URL = ''.join(['http://api.wunderground.com/api/',key,'/history_',datestr,'/q/BE/',city,'.json'])
f = urllib2.urlopen(URL)
json_string = f.read()
parsed_json = json.loads(json_string)
#print json_string
#pprint(parsed_json["history"]['dailysummary'])
hr = "hour"
minu = "min"
time_list=[]
temp_c_list= []
for entry in parsed_json["history"]['dailysummary']:
#"pprint(entry)
hour_value = entry["date"][hr]
min_value = entry["date"][minu]
temp_c = entry[prop]
temp_c_list.append(temp_c)
concattime = datetime.datetime(year,month, day,int(hour_value),int(min_value))
time_list.append(concattime)
#print concattime, temp_c
#Tout_h = pd.DataFrame([temp_c_list],columns = ['T_out'],index = time_list)
#parse_dates=['Date'], dayfirst=True, index_col='Date'
Tout_h = pd.DataFrame([temp_c_list],columns = [PDcolumnname], index=time_list)
#
#Tout_h_T.set_index(time_list)
f.close()
return Tout_h
def Fetch_historic_dayaverage_By_date(key,city,date_object,prop = 'meantempm',PDcolumnname ='T_out'):
#same as above, but with dateobject as input. hour / min are ignored
year = date_object.year
month = date_object.month
day = date_object.day
Tout_h = Fetch_historic_dayaverage(key,city,year,month,day,prop,PDcolumnname);
return Tout_h
def Average_Temp_Xdaysago(key,city,x_days_ago = 5,prop = 'meantempm',PDcolumnname ='T_out'):
#get more subhourly details for a single day, eg. day 5
x_days_ago_date = datetime.datetime.today()- datetime.timedelta(days=x_days_ago)
Temp_values = Fetch_historic_dayaverage_By_date(key,city,x_days_ago_date,prop,PDcolumnname)
return Temp_values
Name changes according to pep8 #26
# -*- coding: utf-8 -*-
"""
<author>
<Date>
Functions to fetch (historic, current or prognosed) weather data from wunderground weather stations.
Required: API key, to be added to opengrid.cfg.
"""
# import libraries specific to wunderground script
import datetime
import pytz
import pandas as pd
import pdb
import urllib2
import json
from pprint import pprint
import time
#Script to obtain CURRENT weather detail readings
def fetch_curr_conditions(apikey, city, prop='temp_c'):
'''
Generic function for getting current weather conditions in a specified city.
Parameters
----------
* apikey : String
Wunderground API key (can be obtained after subscription)
* city : String
location of weather station
* prop : String
Type of weather property to look up.
Returns
-------
Author: Ryton & comments from Saroele
Output: double return with value, date/time. If an error occurs, the json response will be printed.
'''
#
URL = ''.join(['http://api.wunderground.com/api/',apikey,'/geolookup/conditions/q/EU/',city,'.json'])
f = urllib2.urlopen(URL)
json_string = f.read()
parsed_json = json.loads(json_string)
#pprint(parsed_json)
try:
location = parsed_json['location']['city']
curr_value = float(parsed_json['current_observation'][prop] )
currdate = datetime.date.today()
except:
pprint(parsed_json)
raise
f.close()
return curr_value, currdate
def details_xdaysago(key,city,x_days_ago = 5,prop = "temp_c",columnname ='T_out'):
"""
Get more subhourly details for a single day, x days ago
todo: complete docstring
"""
x_days_ago_date = datetime.datetime.today()- datetime.timedelta(days=x_days_ago)
temp_values = fetch_historic_temp_bydate(key,city,x_days_ago_date,prop,columnname)
return temp_values
#some scripts for HISTORIC data fetching
def fetch_historic_tempYMD(key,city,year,month,day, prop = 'tempm',columnname= 'T_out' ):
"""
Get temp df using year month day.
Example URL: http://api.wunderground.com/api/<key>/history_20140808/q/EU/Leuven.json
todo: complete docstring
"""
d = datetime.datetime(year,month,day,0,0)
datestr = '{:%Y%m%d}'.format(d)
URL = ''.join(['http://api.wunderground.com/api/',key,'/history_',datestr,'/q/BE/',city,'.json'])
f = urllib2.urlopen(URL)
json_string = f.read()
parsed_json = json.loads(json_string)
#print json_string
#pprint(parsed_json["history"]['dailysummary'])
hr = "hour"
minu = "min"
time_list=[]
temp_c_list= []
for entry in parsed_json["history"]["observations"]:
#pprint(entry)
hour_value = entry["date"][hr]
min_value = entry["date"][minu]
temp_c = entry[prop]
temp_c_list.append(float(temp_c))
concattime = datetime.datetime(year,month, day,int(hour_value),int(min_value))
time_list.append(concattime)
#print concattime, temp_c
# print shape(temp_c_list), shape(time_list)
Tout_h = pd.DataFrame(temp_c_list,columns = [columnname],index = time_list)
return Tout_h
f.close()
def fetch_historic_temp_bydate(key,city,date_object, prop = 'tempm',columnname= 'T_out' ):
#same as above, but with dateobject as input. hour / min are ignored
year = date_object.year
month = date_object.month
day = date_object.day
Tout_h = fetch_historic_tempYMD(key,city,year,month,day, prop ,columnname)
return Tout_h
#scripts for Historic day-AVERAGE weather fetching
def fetch_historic_dayaverage(key,city,year,month,day,prop = "meantempm",columnname ='T_out'):
# example URL: http://api.wunderground.com/api/<key>/history_20140808/q/EU/Leuven.json
# get temp df using year month day.
#city = 'Geel'
d = datetime.datetime(year,month,day,0,0)
datestr = '{:%Y%m%d}'.format(d)
URL = ''.join(['http://api.wunderground.com/api/',key,'/history_',datestr,'/q/BE/',city,'.json'])
f = urllib2.urlopen(URL)
json_string = f.read()
parsed_json = json.loads(json_string)
#print json_string
#pprint(parsed_json["history"]['dailysummary'])
hr = "hour"
minu = "min"
time_list=[]
temp_c_list= []
for entry in parsed_json["history"]['dailysummary']:
#"pprint(entry)
hour_value = entry["date"][hr]
min_value = entry["date"][minu]
temp_c = entry[prop]
temp_c_list.append(temp_c)
concattime = datetime.datetime(year,month, day,int(hour_value),int(min_value))
time_list.append(concattime)
#print concattime, temp_c
#Tout_h = pd.DataFrame([temp_c_list],columns = ['T_out'],index = time_list)
#parse_dates=['Date'], dayfirst=True, index_col='Date'
Tout_h = pd.DataFrame([temp_c_list],columns = [columnname], index=time_list)
#
#Tout_h_T.set_index(time_list)
f.close()
return Tout_h
def fetch_historic_dayaverage_by_date(key,city,date_object,prop = 'meantempm',columnname ='T_out'):
#same as above, but with dateobject as input. hour / min are ignored
year = date_object.year
month = date_object.month
day = date_object.day
Tout_h = fetch_historic_dayaverage(key,city,year,month,day,prop,columnname);
return Tout_h
def average_temp_xdaysago(key,city,x_days_ago = 5,prop = 'meantempm',columnname ='T_out'):
#get more subhourly details for a single day, eg. day 5
x_days_ago_date = datetime.datetime.today()- datetime.timedelta(days=x_days_ago)
temp_values = fetch_historic_dayaverage_by_date(key,city,x_days_ago_date,prop,columnname)
return temp_values
|
# Author: F. Alex Wolf (http://falexwolf.de)
"""Data Graph
Represent a data matrix as a weighted graph of nearest neighbor relations
(edges) among data points (nodes).
"""
import numpy as np
import scipy as sp
import scipy.spatial
import scipy.sparse
from scipy.sparse import issparse
from joblib import Parallel, delayed
from ..cython import utils_cy
from .. import settings as sett
from .. import logging as logg
from .. import utils
def add_or_update_graph_in_adata(
adata,
n_neighbors=30,
n_pcs=50,
n_dcs=None,
knn=None,
recompute_pca=False,
recompute_distances=False,
recompute_graph=False,
n_jobs=None):
graph = DataGraph(adata,
k=n_neighbors,
n_pcs=n_pcs,
n_dcs=n_dcs,
recompute_pca=recompute_pca,
recompute_distances=recompute_distances,
recompute_graph=recompute_graph,
n_jobs=n_jobs)
if graph.fresh_compute:
graph.update_diffmap()
adata.add['distance'] = graph.Dsq
adata.add['Ktilde'] = graph.Ktilde
adata.smp['X_diffmap'] = graph.rbasis[:, 1:]
adata.smp['X_diffmap0'] = graph.rbasis[:, 0]
adata.add['diffmap_evals'] = graph.evals[1:]
return graph
def no_recompute_of_graph_necessary(
adata,
recompute_pca=False,
recompute_distances=False,
recompute_graph=False,
n_neighbors=None,
knn=None,
n_dcs=None):
return (not recompute_pca
and not recompute_distances
and not recompute_graph
# make sure X_diffmap is there
and 'X_diffmap' in adata.smp
# make sure enough DCs are there
and (adata.smp['X_diffmap'].shape[1] >= n_dcs-1
if n_dcs is not None else True)
# make sure that it's sparse
and (issparse(adata.add['Ktilde']) == knn
if knn is not None else True)
# make sure n_neighbors matches
and n_neighbors == adata.add['distance'][0].nonzero()[0].size + 1)
def get_neighbors(X, Y, k):
Dsq = utils.comp_sqeuclidean_distance_using_matrix_mult(X, Y)
chunk_range = np.arange(Dsq.shape[0])[:, None]
indices_chunk = np.argpartition(Dsq, k-1, axis=1)[:, :k]
indices_chunk = indices_chunk[chunk_range,
np.argsort(Dsq[chunk_range, indices_chunk])]
indices_chunk = indices_chunk[:, 1:] # exclude first data point (point itself)
distances_chunk = Dsq[chunk_range, indices_chunk]
return indices_chunk, distances_chunk
def get_distance_matrix_and_neighbors(X, k, sparse=True, n_jobs=1):
"""Compute distance matrix in squared Euclidian norm.
"""
if not sparse:
if False: Dsq = utils.comp_distance(X, metric='sqeuclidean')
else: Dsq = utils.comp_sqeuclidean_distance_using_matrix_mult(X, X)
sample_range = np.arange(Dsq.shape[0])[:, None]
indices = np.argpartition(Dsq, k-1, axis=1)[:, :k]
indices = indices[sample_range, np.argsort(Dsq[sample_range, indices])]
indices = indices[:, 1:] # exclude first data point (point itself)
distances = Dsq[sample_range, indices]
elif X.shape[0] > 1e5:
# sklearn is slower, but for large sample numbers more stable
from sklearn.neighbors import NearestNeighbors
sklearn_neighbors = NearestNeighbors(n_neighbors=k-1, n_jobs=n_jobs)
sklearn_neighbors.fit(X)
distances, indices = sklearn_neighbors.kneighbors()
distances = distances.astype('float32')**2
else:
# assume we can fit at max 20000 data points into memory
len_chunk = np.ceil(min(20000, X.shape[0]) / n_jobs).astype(int)
n_chunks = np.ceil(X.shape[0] / len_chunk).astype(int)
chunks = [np.arange(start, min(start + len_chunk, X.shape[0]))
for start in range(0, n_chunks * len_chunk, len_chunk)]
indices = np.zeros((X.shape[0], k-1), dtype=int)
distances = np.zeros((X.shape[0], k-1), dtype=np.float32)
if n_jobs > 1:
# set backend threading, said to be meaningful for computations
# with compiled code. more important: avoids hangs
# when using Parallel below, threading is much slower than
# multiprocessing
result_lst = Parallel(n_jobs=n_jobs, backend='threading')(
delayed(get_neighbors)(X[chunk], X, k) for chunk in chunks)
else:
logg.info('--> can be sped up by setting `n_jobs` > 1')
for i_chunk, chunk in enumerate(chunks):
if n_jobs > 1:
indices_chunk, distances_chunk = result_lst[i_chunk]
else:
indices_chunk, distances_chunk = get_neighbors(X[chunk], X, k)
indices[chunk] = indices_chunk
distances[chunk] = distances_chunk
if sparse:
Dsq = get_sparse_distance_matrix(indices, distances, X.shape[0], k)
return Dsq, indices, distances
def get_sparse_distance_matrix(indices, distances, n_samples, k):
n_neighbors = k - 1
n_nonzero = n_samples * n_neighbors
indptr = np.arange(0, n_nonzero + 1, n_neighbors)
Dsq = sp.sparse.csr_matrix((distances.ravel(),
indices.ravel(),
indptr),
shape=(n_samples, n_samples))
return Dsq
def get_indices_distances_from_sparse_matrix(Dsq, k):
indices = np.zeros((Dsq.shape[0], k), dtype=int)
distances = np.zeros((Dsq.shape[0], k), dtype=Dsq.dtype)
for i in range(indices.shape[0]):
neighbors = Dsq[i].nonzero()
indices[i] = neighbors[1]
distances[i] = Dsq[neighbors]
return indices, distances
class OnFlySymMatrix():
"""Emulate a matrix where elements are calculated on the fly.
"""
def __init__(self, get_row, shape, DC_start=0, DC_end=-1, rows=None, restrict_array=None):
self.get_row = get_row
self.shape = shape
self.DC_start = DC_start
self.DC_end = DC_end
self.rows = {} if rows is None else rows
self.restrict_array = restrict_array # restrict the array to a subset
def __getitem__(self, index):
if isinstance(index, int) or isinstance(index, np.integer):
if self.restrict_array is None:
glob_index = index
else:
# map the index back to the global index
glob_index = self.restrict_array[index]
if glob_index not in self.rows:
self.rows[glob_index] = self.get_row(glob_index)
row = self.rows[glob_index]
if self.restrict_array is None:
return row
else:
return row[self.restrict_array]
else:
if self.restrict_array is None:
glob_index_0, glob_index_1 = index
else:
glob_index_0 = self.restrict_array[index[0]]
glob_index_1 = self.restrict_array[index[1]]
if glob_index_0 not in self.rows:
self.rows[glob_index_0] = self.get_row(glob_index_0)
return self.rows[glob_index_0][glob_index_1]
def restrict(self, index_array):
"""Generate a view restricted to a subset of indices.
"""
new_shape = index_array.shape[0], index_array.shape[0]
return OnFlySymMatrix(self.get_row, new_shape, DC_start=self.DC_start,
DC_end=self.DC_end,
rows=self.rows, restrict_array=index_array)
class DataGraph():
"""Data represented as a graph.
Represent a data matrix as a weighted graph of nearest neighbor relations
(edges) among data points (nodes).
"""
def __init__(self,
adata,
k=None,
knn=True,
n_jobs=None,
n_pcs=50,
n_dcs=15,
recompute_pca=False,
recompute_distances=False,
recompute_graph=False,
flavor='haghverdi16'):
self.sym = True # we do not allow asymetric cases
self.flavor = flavor # this is to experiment around
self.n_pcs = n_pcs
self.n_dcs = n_dcs
self.init_iroot_and_X(adata, recompute_pca, n_pcs)
# use the graph in adata
if no_recompute_of_graph_necessary(
adata,
recompute_pca=recompute_pca,
recompute_distances=recompute_distances,
recompute_graph=recompute_graph,
n_neighbors=k,
knn=knn,
n_dcs=n_dcs):
self.fresh_compute = False
self.knn = issparse(adata.add['Ktilde'])
self.Ktilde = adata.add['Ktilde']
self.Dsq = adata.add['distance']
if self.knn:
self.k = adata.add['distance'][0].nonzero()[0].size + 1
else:
self.k = None # currently do not store this, is unknown
# for output of spectrum
self.X_diffmap = adata.smp['X_diffmap'][:, :n_dcs-1]
self.evals = np.r_[1, adata.add['diffmap_evals'][:n_dcs-1]]
self.rbasis = np.c_[adata.smp['X_diffmap0'][:, None],
adata.smp['X_diffmap'][:, :n_dcs-1]]
self.lbasis = self.rbasis
self.Dchosen = OnFlySymMatrix(self.get_Ddiff_row,
shape=(self.X.shape[0], self.X.shape[0]))
np.set_printoptions(precision=10)
logg.info(' using stored data graph with n_neighbors = {} and '
'spectrum\n {}'
.format(self.k,
str(self.evals).replace('\n', '\n ')))
# recompute the graph
else:
self.fresh_compute = True
self.k = k if k is not None else 30
logg.info(' computing data graph with n_neighbors = {} '
.format(self.k))
self.evals = None
self.rbasis = None
self.lbasis = None
self.X_diffmap = None
self.Dsq = None
self.knn = knn
self.n_jobs = sett.n_jobs if n_jobs is None else n_jobs
self.Dchosen = None
self.init_iroot_and_X(adata, recompute_pca, n_pcs)
if False: # TODO
# in case we already computed distance relations
if not recompute_distances and 'distance' in adata.add:
n_neighbors = adata.add['distance'][0].nonzero()[0].size + 1
if (knn and issparse(adata.add['distance'])
and n_neighbors == self.k):
logg.info(' using stored distances with `n_neighbors={}`'
.format(self.k))
self.Dsq = adata.add['distance']
def init_iroot_directly(self, adata):
self.iroot = None
if 'iroot' in adata.add:
if adata.add['iroot'] >= adata.n_smps:
logg.warn('Root cell index {} does not exist for {} samples. '
'Is ignored.'
.format(adata.add['iroot'], adata.n_smps))
else:
self.iroot = adata.add['iroot']
def init_iroot_and_X(self, adata, recompute_pca, n_pcs):
self.X = adata.X # might be overwritten with X_pca in the next line
# retrieve xroot
xroot = None
if 'xroot' in adata.add: xroot = adata.add['xroot']
elif 'xroot' in adata.var: xroot = adata.var['xroot']
# set iroot directly
self.init_iroot_directly(adata)
# see whether we can set self.iroot using the full data matrix
if xroot is not None and xroot.size == self.X.shape[1]:
self.set_root(xroot)
# use the fulll data matrix X, nothing to be done
if self.n_pcs == 0 or self.X.shape[1] <= self.n_pcs:
logg.info(' using data matrix X directly for building graph (no PCA)')
# use X_pca
else:
# use a precomputed X_pca
if (not recompute_pca
and 'X_pca' in adata.smp
and adata.smp['X_pca'].shape[1] >= self.n_pcs):
logg.info(' using "X_pca" for building graph')
# compute X_pca
else:
logg.info(' compute "X_pca" for building graph')
from ..preprocessing import pca
pca(adata, n_comps=self.n_pcs)
# set the data matrix
self.X = adata.smp['X_pca'][:, :n_pcs]
# see whether we can find xroot using X_pca
if xroot is not None and xroot.size == adata.smp['X_pca'].shape[1]:
self.set_root(xroot[:n_pcs])
def update_diffmap(self, n_comps=None):
"""Diffusion Map as of Coifman et al. (2005) and Haghverdi et al. (2016).
"""
if n_comps is not None:
self.n_dcs = n_comps
logg.info(' updating number of DCs to', self.n_dcs)
if self.evals is None or self.evals.size < self.n_dcs:
logg.info(' computing spectral decomposition ("diffmap") with',
self.n_dcs, 'components', r=True)
self.compute_transition_matrix()
self.embed(n_evals=self.n_dcs)
return True
return False
def compute_Ddiff_all(self, n_evals=10):
raise RuntimeError('deprecated function')
self.embed(n_evals=n_evals)
self.compute_M_matrix()
self.compute_Ddiff_matrix()
def compute_C_all(self, n_evals=10):
self.compute_L_matrix()
self.embed(self.L, n_evals=n_evals, sort='increase')
evalsL = self.evals
self.compute_Lp_matrix()
self.compute_C_matrix()
def spec_layout(self):
self.compute_transition_matrix()
self.compute_L_matrix()
self.embed(self.L, sort='increase')
# write results to dictionary
ddmap = {}
# skip the first eigenvalue/eigenvector
ddmap['Y'] = self.rbasis[:, 1:]
ddmap['evals'] = self.evals[1:]
return ddmap
def compute_distance_matrix(self):
logg.m('computing distance matrix with n_neighbors = {}'
.format(self.k), v=4)
Dsq, indices, distances_sq = get_distance_matrix_and_neighbors(
X=self.X,
k=self.k,
sparse=self.knn,
n_jobs=self.n_jobs)
self.Dsq = Dsq
return Dsq, indices, distances_sq
def compute_transition_matrix(self, alpha=1, recompute_distance=False):
"""Compute transition matrix.
Parameters
----------
alpha : float
The density rescaling parameter of Coifman and Lafon (2006). Should
in all practical applications equal 1: Then only the geometry of the
data matters, not the sampled density.
neglect_selfloops : bool
Discard selfloops.
References
----------
Haghverdi et al. (2016), Coifman and Lafon (2006), Coifman et al. (2005).
"""
if self.Dsq is None or recompute_distance:
Dsq, indices, distances_sq = self.compute_distance_matrix()
else:
Dsq = self.Dsq
indices, distances_sq = get_indices_distances_from_sparse_matrix(Dsq, self.k)
# choose sigma, the heuristic here often makes not much
# of a difference, but is used to reproduce the figures
# of Haghverdi et al. (2016)
if self.knn:
# as the distances are not sorted except for last element
# take median
sigmas_sq = np.median(distances_sq, axis=1)
else:
# the last item is already in its sorted position as
# argpartition puts the (k-1)th element - starting to count from
# zero - in its sorted position
sigmas_sq = distances_sq[:, -1]/4
sigmas = np.sqrt(sigmas_sq)
logg.m('determined n_neighbors =',
self.k, 'nearest neighbors of each point', t=True, v=4)
if self.flavor == 'unweighted':
if not self.knn:
raise ValueError('`flavor="unweighted"` only with `knn=True`.')
self.Ktilde = self.Dsq.sign()
return
# compute the symmetric weight matrix
if not sp.sparse.issparse(self.Dsq):
Num = 2 * np.multiply.outer(sigmas, sigmas)
Den = np.add.outer(sigmas_sq, sigmas_sq)
W = np.sqrt(Num/Den) * np.exp(-Dsq/Den)
# make the weight matrix sparse
if not self.knn:
self.Mask = W > 1e-14
W[self.Mask == False] = 0
else:
# restrict number of neighbors to ~k
# build a symmetric mask
Mask = np.zeros(Dsq.shape, dtype=bool)
for i, row in enumerate(indices):
Mask[i, row] = True
for j in row:
if i not in set(indices[j]):
W[j, i] = W[i, j]
Mask[j, i] = True
# set all entries that are not nearest neighbors to zero
W[Mask == False] = 0
self.Mask = Mask
else:
W = Dsq
for i in range(len(Dsq.indptr[:-1])):
row = Dsq.indices[Dsq.indptr[i]:Dsq.indptr[i+1]]
num = 2 * sigmas[i] * sigmas[row]
den = sigmas_sq[i] + sigmas_sq[row]
W.data[Dsq.indptr[i]:Dsq.indptr[i+1]] = np.sqrt(num/den) * np.exp(-Dsq.data[Dsq.indptr[i]: Dsq.indptr[i+1]] / den)
W = W.tolil()
for i, row in enumerate(indices):
for j in row:
if i not in set(indices[j]):
W[j, i] = W[i, j]
if False:
W.setdiag(1) # set diagonal to one
logg.m(' note that now, we set the diagonal of the weight matrix to one!')
W = W.tocsr()
logg.m('computed W (weight matrix) with "knn" =', self.knn, t=True, v=4)
if False:
pl.matshow(W)
pl.title('$ W$')
pl.colorbar()
# density normalization
# as discussed in Coifman et al. (2005)
# ensure that kernel matrix is independent of sampling density
if alpha == 0:
# nothing happens here, simply use the isotropic similarity matrix
self.K = W
else:
# q[i] is an estimate for the sampling density at point x_i
# it's also the degree of the underlying graph
if not sp.sparse.issparse(W):
q = np.sum(W, axis=0)
# raise to power alpha
if alpha != 1:
q = q**alpha
Den = np.outer(q, q)
self.K = W / Den
else:
q = np.array(np.sum(W, axis=0)).flatten()
self.K = W
for i in range(len(W.indptr[:-1])):
row = W.indices[W.indptr[i]: W.indptr[i+1]]
num = q[i] * q[row]
W.data[W.indptr[i]: W.indptr[i+1]] = W.data[W.indptr[i]: W.indptr[i+1]] / num
logg.m('computed K (anisotropic kernel)', t=True, v=4)
if not sp.sparse.issparse(self.K):
# now compute the row normalization to build the transition matrix T
# and the adjoint Ktilde: both have the same spectrum
self.z = np.sum(self.K, axis=0)
# the following is the transition matrix
self.T = self.K / self.z[:, np.newaxis]
# now we need the square root of the density
self.sqrtz = np.array(np.sqrt(self.z))
# now compute the density-normalized Kernel
# it's still symmetric
szszT = np.outer(self.sqrtz, self.sqrtz)
self.Ktilde = self.K / szszT
else:
self.z = np.array(np.sum(self.K, axis=0)).flatten()
# now we need the square root of the density
self.sqrtz = np.array(np.sqrt(self.z))
# now compute the density-normalized Kernel
# it's still symmetric
self.Ktilde = self.K
for i in range(len(self.K.indptr[:-1])):
row = self.K.indices[self.K.indptr[i]: self.K.indptr[i+1]]
num = self.sqrtz[i] * self.sqrtz[row]
self.Ktilde.data[self.K.indptr[i]: self.K.indptr[i+1]] = self.K.data[self.K.indptr[i]: self.K.indptr[i+1]] / num
logg.m('computed Ktilde (normalized anistropic kernel)', v=4)
def compute_L_matrix(self):
"""Graph Laplacian for K.
"""
self.L = np.diag(self.z) - self.K
logg.info('compute graph Laplacian')
def embed(self, matrix=None, n_evals=15, sym=None, sort='decrease'):
"""Compute eigen decomposition of matrix.
Parameters
----------
matrix : np.ndarray
Matrix to diagonalize.
n_evals : int
Number of eigenvalues/vectors to be computed, set n_evals = 0 if
you need all eigenvectors.
sym : bool
Instead of computing the eigendecomposition of the assymetric
transition matrix, computed the eigendecomposition of the symmetric
Ktilde matrix.
Writes attributes
-----------------
evals : np.ndarray
Eigenvalues of transition matrix
lbasis : np.ndarray
Matrix of left eigenvectors (stored in columns).
rbasis : np.ndarray
Matrix of right eigenvectors (stored in columns).
self.rbasis is projection of data matrix on right eigenvectors,
that is, the projection on the diffusion components.
these are simply the components of the right eigenvectors
and can directly be used for plotting.
"""
np.set_printoptions(precision=10)
if sym is None: sym = self.sym
self.rbasisBool = True
if matrix is None: matrix = self.Ktilde
# compute the spectrum
if n_evals == 0:
evals, evecs = sp.linalg.eigh(matrix)
else:
n_evals = min(matrix.shape[0]-1, n_evals)
# ncv = max(2 * n_evals + 1, int(np.sqrt(matrix.shape[0])))
ncv = None
which = 'LM' if sort == 'decrease' else 'SM'
# it pays off to increase the stability with a bit more precision
matrix = matrix.astype(np.float64)
evals, evecs = sp.sparse.linalg.eigsh(matrix, k=n_evals,
which=which, ncv=ncv)
evals, evecs = evals.astype(np.float32), evecs.astype(np.float32)
if sort == 'decrease':
evals = evals[::-1]
evecs = evecs[:, ::-1]
if logg.verbosity_greater_or_equal_than(4):
logg.m('computed eigenvalues', t=True, v=4)
else:
logg.info(' eigenvalues of transition matrix')
logg.info(' ', str(evals).replace('\n', '\n '))
# assign attributes
self.evals = evals
count_ones = sum([1 for v in self.evals if v == 1])
if count_ones > len(self.evals)/2:
logg.warn('Transition matrix has many irreducible blocks!')
if sym:
self.rbasis = self.lbasis = evecs
else:
# The eigenvectors of T are stored in self.rbasis and self.lbasis
# and are simple trafos of the eigenvectors of Ktilde.
# rbasis and lbasis are right and left eigenvectors, respectively
self.rbasis = np.array(evecs / self.sqrtz[:, np.newaxis])
self.lbasis = np.array(evecs * self.sqrtz[:, np.newaxis])
# normalize in L2 norm
# note that, in contrast to that, a probability distribution
# on the graph is normalized in L1 norm
# therefore, the eigenbasis in this normalization does not correspond
# to a probability distribution on the graph
if False:
self.rbasis /= np.linalg.norm(self.rbasis, axis=0, ord=2)
self.lbasis /= np.linalg.norm(self.lbasis, axis=0, ord=2)
# init on-the-fly computed distance "matrix"
self.Dchosen = OnFlySymMatrix(self.get_Ddiff_row,
shape=self.Dsq.shape)
def _get_M_row_chunk(self, i_range):
M_chunk = np.zeros((len(i_range), self.X.shape[0]), dtype=np.float32)
for i_cnt, i in enumerate(i_range):
if False: # not much slower, but slower
M_chunk[i_cnt] = self.get_M_row(j)
else:
M_chunk[i_cnt] = utils_cy.get_M_row(i, self.evals, self.rbasis, self.lbasis)
return M_chunk
def compute_M_matrix(self):
"""The M matrix is the matrix that results from summing over all powers of
T in the subspace without the first eigenspace.
See Haghverdi et al. (2016).
"""
if self.n_jobs >= 4: # if we have enough cores, skip this step
return # TODO: make sure that this is really the best strategy
logg.m(' try computing "M" matrix using up to 90% of `sett.max_memory`')
if True: # Python version
self.M = sum([self.evals[l]/(1-self.evals[l])
* np.outer(self.rbasis[:, l], self.lbasis[:, l])
for l in range(1, self.evals.size)])
self.M += np.outer(self.rbasis[:, 0], self.lbasis[:, 0])
else: # Cython version
used_memory, _ = logg.get_memory_usage()
memory_for_M = self.X.shape[0]**2 * 23 / 8 / 1e9 # in GB
logg.m(' max memory =', sett.max_memory,
' / used memory = {:.1f}'.format(used_memory),
' / memory_for_M = {:.1f}'.format(memory_for_M))
if used_memory + memory_for_M < 0.9 * sett.max_memory:
logg.m(0, ' allocate memory and compute M matrix')
len_chunk = np.ceil(self.X.shape[0] / self.n_jobs).astype(int)
n_chunks = np.ceil(self.X.shape[0] / len_chunk).astype(int)
chunks = [np.arange(start, min(start + len_chunk, self.X.shape[0]))
for start in range(0, n_chunks * len_chunk, len_chunk)]
# parallel computing does not seem to help
if False: # self.n_jobs > 1:
# here backend threading is not necessary, and seems to slow
# down everything considerably
result_lst = Parallel(n_jobs=self.n_jobs, backend='threading')(
delayed(self._get_M_row_chunk)(chunk)
for chunk in chunks)
self.M = np.zeros((self.X.shape[0], self.X.shape[0]),
dtype=np.float32)
for i_chunk, chunk in enumerate(chunks):
if False: # self.n_jobs > 1:
M_chunk = result_lst[i_chunk]
else:
M_chunk = self._get_M_row_chunk(chunk)
self.M[chunk] = M_chunk
# the following did not work
# filename = sett.writedir + 'tmp.npy'
# np.save(filename, self.M)
# self.M = filename
sett.mt(0, 'finished computation of M')
else:
logg.m('not enough memory to compute M, using "on-the-fly" computation')
def compute_Ddiff_matrix(self):
"""Returns the distance matrix in the Diffusion Pseudotime metric.
See Haghverdi et al. (2016).
Notes
-----
- Is based on M matrix.
- self.Ddiff[self.iroot,:] stores diffusion pseudotime as a vector.
"""
if self.M.shape[0] > 1000:
logg.m('--> high number of dimensions for computing DPT distance matrix\n'
' computing PCA with 50 components')
from ..preprocessing import pca
self.M = pca(self.M, n_comps=50, mute=True)
self.Ddiff = sp.spatial.distance.squareform(sp.spatial.distance.pdist(self.M))
logg.m('computed Ddiff distance matrix', t=True)
self.Dchosen = self.Ddiff
def _get_Ddiff_row_chunk(self, m_i, j_range):
M = self.M # caching with a file on disk did not work
d_i = np.zeros(len(j_range))
for j_cnt, j in enumerate(j_range):
if False: # not much slower, but slower
m_j = self.get_M_row(j)
d_i[j_cnt] = sp.spatial.distance.cdist(m_i[None, :], m_j[None, :])
else:
if M is None:
m_j = utils_cy.get_M_row(j, self.evals, self.rbasis, self.lbasis)
else:
m_j = M[j]
d_i[j_cnt] = utils_cy.c_dist(m_i, m_j)
return d_i
def get_Ddiff_row(self, i):
if not self.sym:
raise ValueError('Not bug-free implemented! '
'Computation needs to be adjusted if sym=False.')
row = sum([(self.evals[l]/(1-self.evals[l])
* (self.rbasis[i, l] - self.lbasis[:, l]))**2
for l in range(0, self.evals.size) if self.evals[l] < 1])
row += sum([(self.rbasis[i, l] - self.lbasis[:, l])**2
for l in range(0, self.evals.size) if self.evals[l] == 1.0])
return np.sqrt(row)
def get_Ddiff_row_deprecated(self, i):
if self.M is None:
m_i = utils_cy.get_M_row(i, self.evals, self.rbasis, self.lbasis)
else:
m_i = self.M[i]
len_chunk = np.ceil(self.X.shape[0] / self.n_jobs).astype(int)
n_chunks = np.ceil(self.X.shape[0] / len_chunk).astype(int)
chunks = [np.arange(start, min(start + len_chunk, self.X.shape[0]))
for start in range(0, n_chunks * len_chunk, len_chunk)]
if self.n_jobs >= 4: # problems with high memory calculations, we skip computing M above
# here backend threading is not necessary, and seems to slow
# down everything considerably
result_lst = Parallel(n_jobs=self.n_jobs)(
delayed(self._get_Ddiff_row_chunk)(m_i, chunk)
for chunk in chunks)
d_i = np.zeros(self.X.shape[0])
for i_chunk, chunk in enumerate(chunks):
if self.n_jobs >= 4: d_i_chunk = result_lst[i_chunk]
else: d_i_chunk = self._get_Ddiff_row_chunk(m_i, chunk)
d_i[chunk] = d_i_chunk
return d_i
def compute_Lp_matrix(self):
"""See Fouss et al. (2006) and von Luxburg et al. (2007).
See Proposition 6 in von Luxburg (2007) and the inline equations
right in the text above.
"""
self.Lp = sum([1/self.evals[i]
* np.outer(self.rbasis[:, i], self.lbasis[:, i])
for i in range(1, self.evals.size)])
sett.mt(0, 'computed pseudoinverse of Laplacian')
def compute_C_matrix(self):
"""See Fouss et al. (2006) and von Luxburg et al. (2007).
This is the commute-time matrix. It's a squared-euclidian distance
matrix in \mathbb{R}^n.
"""
self.C = np.repeat(np.diag(self.Lp)[:, np.newaxis],
self.Lp.shape[0], axis=1)
self.C += np.repeat(np.diag(self.Lp)[np.newaxis, :],
self.Lp.shape[0], axis=0)
self.C -= 2*self.Lp
# the following is much slower
# self.C = np.zeros(self.Lp.shape)
# for i in range(self.Lp.shape[0]):
# for j in range(self.Lp.shape[1]):
# self.C[i, j] = self.Lp[i, i] + self.Lp[j, j] - 2*self.Lp[i, j]
volG = np.sum(self.z)
self.C *= volG
sett.mt(0, 'computed commute distance matrix')
self.Dchosen = self.C
def compute_MFP_matrix(self):
"""See Fouss et al. (2006).
This is the mean-first passage time matrix. It's not a distance.
Mfp[i, k] := m(k|i) in the notation of Fouss et al. (2006). This
corresponds to the standard notation for transition matrices (left index
initial state, right index final state, i.e. a right-stochastic
matrix, with each row summing to one).
"""
self.MFP = np.zeros(self.Lp.shape)
for i in range(self.Lp.shape[0]):
for k in range(self.Lp.shape[1]):
for j in range(self.Lp.shape[1]):
self.MFP[i, k] += (self.Lp[i, j] - self.Lp[i, k]
- self.Lp[k, j] + self.Lp[k, k]) * self.z[j]
sett.mt(0, 'computed mean first passage time matrix')
self.Dchosen = self.MFP
def set_pseudotime(self):
"""Return pseudotime with respect to root point.
"""
self.pseudotime = self.Dchosen[self.iroot].copy()
self.pseudotime /= np.max(self.pseudotime)
def set_root(self, xroot):
"""Determine the index of the root cell.
Given an expression vector, find the observation index that is closest
to this vector.
Parameters
----------
xroot : np.ndarray
Vector that marks the root cell, the vector storing the initial
condition, only relevant for computing pseudotime.
"""
if self.X.shape[1] != xroot.size:
raise ValueError('The root vector you provided does not have the '
'correct dimension. Make sure you provide the dimension-'
'reduced version, if you provided X_pca.')
# this is the squared distance
dsqroot = 1e10
iroot = 0
for i in range(self.X.shape[0]):
diff = self.X[i, :] - xroot
dsq = diff.dot(diff)
if dsq < dsqroot:
dsqroot = dsq
iroot = i
if np.sqrt(dsqroot) < 1e-10: break
logg.m('setting root index to', iroot, v=4)
if self.iroot is not None and iroot != self.iroot:
logg.warn('Changing index of iroot from {} to {}.'.format(self.iroot, iroot))
self.iroot = iroot
return self.iroot
def _test_embed(self):
"""
Checks and tests for embed.
"""
# pl.semilogy(w,'x',label=r'$ \widetilde K$')
# pl.show()
if sett.verbosity > 2:
# output of spectrum of K for comparison
w, v = np.linalg.eigh(self.K)
sett.mi('spectrum of K (kernel)')
if sett.verbosity > 3:
# direct computation of spectrum of T
w, vl, vr = sp.linalg.eig(self.T, left=True)
sett.mi('spectrum of transition matrix (should be same as of Ktilde)')
ensured consistency of last commit
# Author: F. Alex Wolf (http://falexwolf.de)
"""Data Graph
Represent a data matrix as a weighted graph of nearest neighbor relations
(edges) among data points (nodes).
"""
import numpy as np
import scipy as sp
import scipy.spatial
import scipy.sparse
from scipy.sparse import issparse
from joblib import Parallel, delayed
from ..cython import utils_cy
from .. import settings as sett
from .. import logging as logg
from .. import utils
def add_or_update_graph_in_adata(
adata,
n_neighbors=30,
n_pcs=50,
n_dcs=15,
knn=None,
recompute_pca=False,
recompute_distances=False,
recompute_graph=False,
n_jobs=None):
graph = DataGraph(adata,
k=n_neighbors,
n_pcs=n_pcs,
n_dcs=n_dcs,
recompute_pca=recompute_pca,
recompute_distances=recompute_distances,
recompute_graph=recompute_graph,
n_jobs=n_jobs)
if graph.fresh_compute:
graph.update_diffmap()
adata.add['distance'] = graph.Dsq
adata.add['Ktilde'] = graph.Ktilde
adata.smp['X_diffmap'] = graph.rbasis[:, 1:]
adata.smp['X_diffmap0'] = graph.rbasis[:, 0]
adata.add['diffmap_evals'] = graph.evals[1:]
return graph
def no_recompute_of_graph_necessary(
adata,
recompute_pca=False,
recompute_distances=False,
recompute_graph=False,
n_neighbors=None,
knn=None,
n_dcs=None):
return (not recompute_pca
and not recompute_distances
and not recompute_graph
# make sure X_diffmap is there
and 'X_diffmap' in adata.smp
# make sure enough DCs are there
and (adata.smp['X_diffmap'].shape[1] >= n_dcs-1
if n_dcs is not None else True)
# make sure that it's sparse
and (issparse(adata.add['Ktilde']) == knn
if knn is not None else True)
# make sure n_neighbors matches
and n_neighbors == adata.add['distance'][0].nonzero()[0].size + 1)
def get_neighbors(X, Y, k):
Dsq = utils.comp_sqeuclidean_distance_using_matrix_mult(X, Y)
chunk_range = np.arange(Dsq.shape[0])[:, None]
indices_chunk = np.argpartition(Dsq, k-1, axis=1)[:, :k]
indices_chunk = indices_chunk[chunk_range,
np.argsort(Dsq[chunk_range, indices_chunk])]
indices_chunk = indices_chunk[:, 1:] # exclude first data point (point itself)
distances_chunk = Dsq[chunk_range, indices_chunk]
return indices_chunk, distances_chunk
def get_distance_matrix_and_neighbors(X, k, sparse=True, n_jobs=1):
"""Compute distance matrix in squared Euclidian norm.
"""
if not sparse:
if False: Dsq = utils.comp_distance(X, metric='sqeuclidean')
else: Dsq = utils.comp_sqeuclidean_distance_using_matrix_mult(X, X)
sample_range = np.arange(Dsq.shape[0])[:, None]
indices = np.argpartition(Dsq, k-1, axis=1)[:, :k]
indices = indices[sample_range, np.argsort(Dsq[sample_range, indices])]
indices = indices[:, 1:] # exclude first data point (point itself)
distances = Dsq[sample_range, indices]
elif X.shape[0] > 1e5:
# sklearn is slower, but for large sample numbers more stable
from sklearn.neighbors import NearestNeighbors
sklearn_neighbors = NearestNeighbors(n_neighbors=k-1, n_jobs=n_jobs)
sklearn_neighbors.fit(X)
distances, indices = sklearn_neighbors.kneighbors()
distances = distances.astype('float32')**2
else:
# assume we can fit at max 20000 data points into memory
len_chunk = np.ceil(min(20000, X.shape[0]) / n_jobs).astype(int)
n_chunks = np.ceil(X.shape[0] / len_chunk).astype(int)
chunks = [np.arange(start, min(start + len_chunk, X.shape[0]))
for start in range(0, n_chunks * len_chunk, len_chunk)]
indices = np.zeros((X.shape[0], k-1), dtype=int)
distances = np.zeros((X.shape[0], k-1), dtype=np.float32)
if n_jobs > 1:
# set backend threading, said to be meaningful for computations
# with compiled code. more important: avoids hangs
# when using Parallel below, threading is much slower than
# multiprocessing
result_lst = Parallel(n_jobs=n_jobs, backend='threading')(
delayed(get_neighbors)(X[chunk], X, k) for chunk in chunks)
else:
logg.info('--> can be sped up by setting `n_jobs` > 1')
for i_chunk, chunk in enumerate(chunks):
if n_jobs > 1:
indices_chunk, distances_chunk = result_lst[i_chunk]
else:
indices_chunk, distances_chunk = get_neighbors(X[chunk], X, k)
indices[chunk] = indices_chunk
distances[chunk] = distances_chunk
if sparse:
Dsq = get_sparse_distance_matrix(indices, distances, X.shape[0], k)
return Dsq, indices, distances
def get_sparse_distance_matrix(indices, distances, n_samples, k):
n_neighbors = k - 1
n_nonzero = n_samples * n_neighbors
indptr = np.arange(0, n_nonzero + 1, n_neighbors)
Dsq = sp.sparse.csr_matrix((distances.ravel(),
indices.ravel(),
indptr),
shape=(n_samples, n_samples))
return Dsq
def get_indices_distances_from_sparse_matrix(Dsq, k):
indices = np.zeros((Dsq.shape[0], k), dtype=int)
distances = np.zeros((Dsq.shape[0], k), dtype=Dsq.dtype)
for i in range(indices.shape[0]):
neighbors = Dsq[i].nonzero()
indices[i] = neighbors[1]
distances[i] = Dsq[neighbors]
return indices, distances
class OnFlySymMatrix():
"""Emulate a matrix where elements are calculated on the fly.
"""
def __init__(self, get_row, shape, DC_start=0, DC_end=-1, rows=None, restrict_array=None):
self.get_row = get_row
self.shape = shape
self.DC_start = DC_start
self.DC_end = DC_end
self.rows = {} if rows is None else rows
self.restrict_array = restrict_array # restrict the array to a subset
def __getitem__(self, index):
if isinstance(index, int) or isinstance(index, np.integer):
if self.restrict_array is None:
glob_index = index
else:
# map the index back to the global index
glob_index = self.restrict_array[index]
if glob_index not in self.rows:
self.rows[glob_index] = self.get_row(glob_index)
row = self.rows[glob_index]
if self.restrict_array is None:
return row
else:
return row[self.restrict_array]
else:
if self.restrict_array is None:
glob_index_0, glob_index_1 = index
else:
glob_index_0 = self.restrict_array[index[0]]
glob_index_1 = self.restrict_array[index[1]]
if glob_index_0 not in self.rows:
self.rows[glob_index_0] = self.get_row(glob_index_0)
return self.rows[glob_index_0][glob_index_1]
def restrict(self, index_array):
"""Generate a view restricted to a subset of indices.
"""
new_shape = index_array.shape[0], index_array.shape[0]
return OnFlySymMatrix(self.get_row, new_shape, DC_start=self.DC_start,
DC_end=self.DC_end,
rows=self.rows, restrict_array=index_array)
class DataGraph():
"""Data represented as a graph.
Represent a data matrix as a weighted graph of nearest neighbor relations
(edges) among data points (nodes).
"""
def __init__(self,
adata,
k=None,
knn=True,
n_jobs=None,
n_pcs=50,
n_dcs=15,
recompute_pca=False,
recompute_distances=False,
recompute_graph=False,
flavor='haghverdi16'):
self.sym = True # we do not allow asymetric cases
self.flavor = flavor # this is to experiment around
self.n_pcs = n_pcs
self.n_dcs = n_dcs
self.init_iroot_and_X(adata, recompute_pca, n_pcs)
# use the graph in adata
if no_recompute_of_graph_necessary(
adata,
recompute_pca=recompute_pca,
recompute_distances=recompute_distances,
recompute_graph=recompute_graph,
n_neighbors=k,
knn=knn,
n_dcs=n_dcs):
self.fresh_compute = False
self.knn = issparse(adata.add['Ktilde'])
self.Ktilde = adata.add['Ktilde']
self.Dsq = adata.add['distance']
if self.knn:
self.k = adata.add['distance'][0].nonzero()[0].size + 1
else:
self.k = None # currently do not store this, is unknown
# for output of spectrum
self.X_diffmap = adata.smp['X_diffmap'][:, :n_dcs-1]
self.evals = np.r_[1, adata.add['diffmap_evals'][:n_dcs-1]]
self.rbasis = np.c_[adata.smp['X_diffmap0'][:, None],
adata.smp['X_diffmap'][:, :n_dcs-1]]
self.lbasis = self.rbasis
self.Dchosen = OnFlySymMatrix(self.get_Ddiff_row,
shape=(self.X.shape[0], self.X.shape[0]))
np.set_printoptions(precision=10)
logg.info(' using stored data graph with n_neighbors = {} and '
'spectrum\n {}'
.format(self.k,
str(self.evals).replace('\n', '\n ')))
# recompute the graph
else:
self.fresh_compute = True
self.k = k if k is not None else 30
logg.info(' computing data graph with n_neighbors = {} '
.format(self.k))
self.evals = None
self.rbasis = None
self.lbasis = None
self.X_diffmap = None
self.Dsq = None
self.knn = knn
self.n_jobs = sett.n_jobs if n_jobs is None else n_jobs
self.Dchosen = None
self.init_iroot_and_X(adata, recompute_pca, n_pcs)
if False: # TODO
# in case we already computed distance relations
if not recompute_distances and 'distance' in adata.add:
n_neighbors = adata.add['distance'][0].nonzero()[0].size + 1
if (knn and issparse(adata.add['distance'])
and n_neighbors == self.k):
logg.info(' using stored distances with `n_neighbors={}`'
.format(self.k))
self.Dsq = adata.add['distance']
def init_iroot_directly(self, adata):
self.iroot = None
if 'iroot' in adata.add:
if adata.add['iroot'] >= adata.n_smps:
logg.warn('Root cell index {} does not exist for {} samples. '
'Is ignored.'
.format(adata.add['iroot'], adata.n_smps))
else:
self.iroot = adata.add['iroot']
def init_iroot_and_X(self, adata, recompute_pca, n_pcs):
self.X = adata.X # might be overwritten with X_pca in the next line
# retrieve xroot
xroot = None
if 'xroot' in adata.add: xroot = adata.add['xroot']
elif 'xroot' in adata.var: xroot = adata.var['xroot']
# set iroot directly
self.init_iroot_directly(adata)
# see whether we can set self.iroot using the full data matrix
if xroot is not None and xroot.size == self.X.shape[1]:
self.set_root(xroot)
# use the fulll data matrix X, nothing to be done
if self.n_pcs == 0 or self.X.shape[1] <= self.n_pcs:
logg.info(' using data matrix X directly for building graph (no PCA)')
# use X_pca
else:
# use a precomputed X_pca
if (not recompute_pca
and 'X_pca' in adata.smp
and adata.smp['X_pca'].shape[1] >= self.n_pcs):
logg.info(' using "X_pca" for building graph')
# compute X_pca
else:
logg.info(' compute "X_pca" for building graph')
from ..preprocessing import pca
pca(adata, n_comps=self.n_pcs)
# set the data matrix
self.X = adata.smp['X_pca'][:, :n_pcs]
# see whether we can find xroot using X_pca
if xroot is not None and xroot.size == adata.smp['X_pca'].shape[1]:
self.set_root(xroot[:n_pcs])
def update_diffmap(self, n_comps=None):
"""Diffusion Map as of Coifman et al. (2005) and Haghverdi et al. (2016).
"""
if n_comps is not None:
self.n_dcs = n_comps
logg.info(' updating number of DCs to', self.n_dcs)
if self.evals is None or self.evals.size < self.n_dcs:
logg.info(' computing spectral decomposition ("diffmap") with',
self.n_dcs, 'components', r=True)
self.compute_transition_matrix()
self.embed(n_evals=self.n_dcs)
return True
return False
def compute_Ddiff_all(self, n_evals=10):
raise RuntimeError('deprecated function')
self.embed(n_evals=n_evals)
self.compute_M_matrix()
self.compute_Ddiff_matrix()
def compute_C_all(self, n_evals=10):
self.compute_L_matrix()
self.embed(self.L, n_evals=n_evals, sort='increase')
evalsL = self.evals
self.compute_Lp_matrix()
self.compute_C_matrix()
def spec_layout(self):
self.compute_transition_matrix()
self.compute_L_matrix()
self.embed(self.L, sort='increase')
# write results to dictionary
ddmap = {}
# skip the first eigenvalue/eigenvector
ddmap['Y'] = self.rbasis[:, 1:]
ddmap['evals'] = self.evals[1:]
return ddmap
def compute_distance_matrix(self):
logg.m('computing distance matrix with n_neighbors = {}'
.format(self.k), v=4)
Dsq, indices, distances_sq = get_distance_matrix_and_neighbors(
X=self.X,
k=self.k,
sparse=self.knn,
n_jobs=self.n_jobs)
self.Dsq = Dsq
return Dsq, indices, distances_sq
def compute_transition_matrix(self, alpha=1, recompute_distance=False):
"""Compute transition matrix.
Parameters
----------
alpha : float
The density rescaling parameter of Coifman and Lafon (2006). Should
in all practical applications equal 1: Then only the geometry of the
data matters, not the sampled density.
neglect_selfloops : bool
Discard selfloops.
References
----------
Haghverdi et al. (2016), Coifman and Lafon (2006), Coifman et al. (2005).
"""
if self.Dsq is None or recompute_distance:
Dsq, indices, distances_sq = self.compute_distance_matrix()
else:
Dsq = self.Dsq
indices, distances_sq = get_indices_distances_from_sparse_matrix(Dsq, self.k)
# choose sigma, the heuristic here often makes not much
# of a difference, but is used to reproduce the figures
# of Haghverdi et al. (2016)
if self.knn:
# as the distances are not sorted except for last element
# take median
sigmas_sq = np.median(distances_sq, axis=1)
else:
# the last item is already in its sorted position as
# argpartition puts the (k-1)th element - starting to count from
# zero - in its sorted position
sigmas_sq = distances_sq[:, -1]/4
sigmas = np.sqrt(sigmas_sq)
logg.m('determined n_neighbors =',
self.k, 'nearest neighbors of each point', t=True, v=4)
if self.flavor == 'unweighted':
if not self.knn:
raise ValueError('`flavor="unweighted"` only with `knn=True`.')
self.Ktilde = self.Dsq.sign()
return
# compute the symmetric weight matrix
if not sp.sparse.issparse(self.Dsq):
Num = 2 * np.multiply.outer(sigmas, sigmas)
Den = np.add.outer(sigmas_sq, sigmas_sq)
W = np.sqrt(Num/Den) * np.exp(-Dsq/Den)
# make the weight matrix sparse
if not self.knn:
self.Mask = W > 1e-14
W[self.Mask == False] = 0
else:
# restrict number of neighbors to ~k
# build a symmetric mask
Mask = np.zeros(Dsq.shape, dtype=bool)
for i, row in enumerate(indices):
Mask[i, row] = True
for j in row:
if i not in set(indices[j]):
W[j, i] = W[i, j]
Mask[j, i] = True
# set all entries that are not nearest neighbors to zero
W[Mask == False] = 0
self.Mask = Mask
else:
W = Dsq
for i in range(len(Dsq.indptr[:-1])):
row = Dsq.indices[Dsq.indptr[i]:Dsq.indptr[i+1]]
num = 2 * sigmas[i] * sigmas[row]
den = sigmas_sq[i] + sigmas_sq[row]
W.data[Dsq.indptr[i]:Dsq.indptr[i+1]] = np.sqrt(num/den) * np.exp(-Dsq.data[Dsq.indptr[i]: Dsq.indptr[i+1]] / den)
W = W.tolil()
for i, row in enumerate(indices):
for j in row:
if i not in set(indices[j]):
W[j, i] = W[i, j]
if False:
W.setdiag(1) # set diagonal to one
logg.m(' note that now, we set the diagonal of the weight matrix to one!')
W = W.tocsr()
logg.m('computed W (weight matrix) with "knn" =', self.knn, t=True, v=4)
if False:
pl.matshow(W)
pl.title('$ W$')
pl.colorbar()
# density normalization
# as discussed in Coifman et al. (2005)
# ensure that kernel matrix is independent of sampling density
if alpha == 0:
# nothing happens here, simply use the isotropic similarity matrix
self.K = W
else:
# q[i] is an estimate for the sampling density at point x_i
# it's also the degree of the underlying graph
if not sp.sparse.issparse(W):
q = np.sum(W, axis=0)
# raise to power alpha
if alpha != 1:
q = q**alpha
Den = np.outer(q, q)
self.K = W / Den
else:
q = np.array(np.sum(W, axis=0)).flatten()
self.K = W
for i in range(len(W.indptr[:-1])):
row = W.indices[W.indptr[i]: W.indptr[i+1]]
num = q[i] * q[row]
W.data[W.indptr[i]: W.indptr[i+1]] = W.data[W.indptr[i]: W.indptr[i+1]] / num
logg.m('computed K (anisotropic kernel)', t=True, v=4)
if not sp.sparse.issparse(self.K):
# now compute the row normalization to build the transition matrix T
# and the adjoint Ktilde: both have the same spectrum
self.z = np.sum(self.K, axis=0)
# the following is the transition matrix
self.T = self.K / self.z[:, np.newaxis]
# now we need the square root of the density
self.sqrtz = np.array(np.sqrt(self.z))
# now compute the density-normalized Kernel
# it's still symmetric
szszT = np.outer(self.sqrtz, self.sqrtz)
self.Ktilde = self.K / szszT
else:
self.z = np.array(np.sum(self.K, axis=0)).flatten()
# now we need the square root of the density
self.sqrtz = np.array(np.sqrt(self.z))
# now compute the density-normalized Kernel
# it's still symmetric
self.Ktilde = self.K
for i in range(len(self.K.indptr[:-1])):
row = self.K.indices[self.K.indptr[i]: self.K.indptr[i+1]]
num = self.sqrtz[i] * self.sqrtz[row]
self.Ktilde.data[self.K.indptr[i]: self.K.indptr[i+1]] = self.K.data[self.K.indptr[i]: self.K.indptr[i+1]] / num
logg.m('computed Ktilde (normalized anistropic kernel)', v=4)
def compute_L_matrix(self):
"""Graph Laplacian for K.
"""
self.L = np.diag(self.z) - self.K
logg.info('compute graph Laplacian')
def embed(self, matrix=None, n_evals=15, sym=None, sort='decrease'):
"""Compute eigen decomposition of matrix.
Parameters
----------
matrix : np.ndarray
Matrix to diagonalize.
n_evals : int
Number of eigenvalues/vectors to be computed, set n_evals = 0 if
you need all eigenvectors.
sym : bool
Instead of computing the eigendecomposition of the assymetric
transition matrix, computed the eigendecomposition of the symmetric
Ktilde matrix.
Writes attributes
-----------------
evals : np.ndarray
Eigenvalues of transition matrix
lbasis : np.ndarray
Matrix of left eigenvectors (stored in columns).
rbasis : np.ndarray
Matrix of right eigenvectors (stored in columns).
self.rbasis is projection of data matrix on right eigenvectors,
that is, the projection on the diffusion components.
these are simply the components of the right eigenvectors
and can directly be used for plotting.
"""
np.set_printoptions(precision=10)
if sym is None: sym = self.sym
self.rbasisBool = True
if matrix is None: matrix = self.Ktilde
# compute the spectrum
if n_evals == 0:
evals, evecs = sp.linalg.eigh(matrix)
else:
n_evals = min(matrix.shape[0]-1, n_evals)
# ncv = max(2 * n_evals + 1, int(np.sqrt(matrix.shape[0])))
ncv = None
which = 'LM' if sort == 'decrease' else 'SM'
# it pays off to increase the stability with a bit more precision
matrix = matrix.astype(np.float64)
evals, evecs = sp.sparse.linalg.eigsh(matrix, k=n_evals,
which=which, ncv=ncv)
evals, evecs = evals.astype(np.float32), evecs.astype(np.float32)
if sort == 'decrease':
evals = evals[::-1]
evecs = evecs[:, ::-1]
if logg.verbosity_greater_or_equal_than(4):
logg.m('computed eigenvalues', t=True, v=4)
else:
logg.info(' eigenvalues of transition matrix')
logg.info(' ', str(evals).replace('\n', '\n '))
# assign attributes
self.evals = evals
count_ones = sum([1 for v in self.evals if v == 1])
if count_ones > len(self.evals)/2:
logg.warn('Transition matrix has many irreducible blocks!')
if sym:
self.rbasis = self.lbasis = evecs
else:
# The eigenvectors of T are stored in self.rbasis and self.lbasis
# and are simple trafos of the eigenvectors of Ktilde.
# rbasis and lbasis are right and left eigenvectors, respectively
self.rbasis = np.array(evecs / self.sqrtz[:, np.newaxis])
self.lbasis = np.array(evecs * self.sqrtz[:, np.newaxis])
# normalize in L2 norm
# note that, in contrast to that, a probability distribution
# on the graph is normalized in L1 norm
# therefore, the eigenbasis in this normalization does not correspond
# to a probability distribution on the graph
if False:
self.rbasis /= np.linalg.norm(self.rbasis, axis=0, ord=2)
self.lbasis /= np.linalg.norm(self.lbasis, axis=0, ord=2)
# init on-the-fly computed distance "matrix"
self.Dchosen = OnFlySymMatrix(self.get_Ddiff_row,
shape=self.Dsq.shape)
def _get_M_row_chunk(self, i_range):
M_chunk = np.zeros((len(i_range), self.X.shape[0]), dtype=np.float32)
for i_cnt, i in enumerate(i_range):
if False: # not much slower, but slower
M_chunk[i_cnt] = self.get_M_row(j)
else:
M_chunk[i_cnt] = utils_cy.get_M_row(i, self.evals, self.rbasis, self.lbasis)
return M_chunk
def compute_M_matrix(self):
"""The M matrix is the matrix that results from summing over all powers of
T in the subspace without the first eigenspace.
See Haghverdi et al. (2016).
"""
if self.n_jobs >= 4: # if we have enough cores, skip this step
return # TODO: make sure that this is really the best strategy
logg.m(' try computing "M" matrix using up to 90% of `sett.max_memory`')
if True: # Python version
self.M = sum([self.evals[l]/(1-self.evals[l])
* np.outer(self.rbasis[:, l], self.lbasis[:, l])
for l in range(1, self.evals.size)])
self.M += np.outer(self.rbasis[:, 0], self.lbasis[:, 0])
else: # Cython version
used_memory, _ = logg.get_memory_usage()
memory_for_M = self.X.shape[0]**2 * 23 / 8 / 1e9 # in GB
logg.m(' max memory =', sett.max_memory,
' / used memory = {:.1f}'.format(used_memory),
' / memory_for_M = {:.1f}'.format(memory_for_M))
if used_memory + memory_for_M < 0.9 * sett.max_memory:
logg.m(0, ' allocate memory and compute M matrix')
len_chunk = np.ceil(self.X.shape[0] / self.n_jobs).astype(int)
n_chunks = np.ceil(self.X.shape[0] / len_chunk).astype(int)
chunks = [np.arange(start, min(start + len_chunk, self.X.shape[0]))
for start in range(0, n_chunks * len_chunk, len_chunk)]
# parallel computing does not seem to help
if False: # self.n_jobs > 1:
# here backend threading is not necessary, and seems to slow
# down everything considerably
result_lst = Parallel(n_jobs=self.n_jobs, backend='threading')(
delayed(self._get_M_row_chunk)(chunk)
for chunk in chunks)
self.M = np.zeros((self.X.shape[0], self.X.shape[0]),
dtype=np.float32)
for i_chunk, chunk in enumerate(chunks):
if False: # self.n_jobs > 1:
M_chunk = result_lst[i_chunk]
else:
M_chunk = self._get_M_row_chunk(chunk)
self.M[chunk] = M_chunk
# the following did not work
# filename = sett.writedir + 'tmp.npy'
# np.save(filename, self.M)
# self.M = filename
sett.mt(0, 'finished computation of M')
else:
logg.m('not enough memory to compute M, using "on-the-fly" computation')
def compute_Ddiff_matrix(self):
"""Returns the distance matrix in the Diffusion Pseudotime metric.
See Haghverdi et al. (2016).
Notes
-----
- Is based on M matrix.
- self.Ddiff[self.iroot,:] stores diffusion pseudotime as a vector.
"""
if self.M.shape[0] > 1000:
logg.m('--> high number of dimensions for computing DPT distance matrix\n'
' computing PCA with 50 components')
from ..preprocessing import pca
self.M = pca(self.M, n_comps=50, mute=True)
self.Ddiff = sp.spatial.distance.squareform(sp.spatial.distance.pdist(self.M))
logg.m('computed Ddiff distance matrix', t=True)
self.Dchosen = self.Ddiff
def _get_Ddiff_row_chunk(self, m_i, j_range):
M = self.M # caching with a file on disk did not work
d_i = np.zeros(len(j_range))
for j_cnt, j in enumerate(j_range):
if False: # not much slower, but slower
m_j = self.get_M_row(j)
d_i[j_cnt] = sp.spatial.distance.cdist(m_i[None, :], m_j[None, :])
else:
if M is None:
m_j = utils_cy.get_M_row(j, self.evals, self.rbasis, self.lbasis)
else:
m_j = M[j]
d_i[j_cnt] = utils_cy.c_dist(m_i, m_j)
return d_i
def get_Ddiff_row(self, i):
if not self.sym:
raise ValueError('Not bug-free implemented! '
'Computation needs to be adjusted if sym=False.')
row = sum([(self.evals[l]/(1-self.evals[l])
* (self.rbasis[i, l] - self.lbasis[:, l]))**2
for l in range(0, self.evals.size) if self.evals[l] < 1])
row += sum([(self.rbasis[i, l] - self.lbasis[:, l])**2
for l in range(0, self.evals.size) if self.evals[l] == 1.0])
return np.sqrt(row)
def get_Ddiff_row_deprecated(self, i):
if self.M is None:
m_i = utils_cy.get_M_row(i, self.evals, self.rbasis, self.lbasis)
else:
m_i = self.M[i]
len_chunk = np.ceil(self.X.shape[0] / self.n_jobs).astype(int)
n_chunks = np.ceil(self.X.shape[0] / len_chunk).astype(int)
chunks = [np.arange(start, min(start + len_chunk, self.X.shape[0]))
for start in range(0, n_chunks * len_chunk, len_chunk)]
if self.n_jobs >= 4: # problems with high memory calculations, we skip computing M above
# here backend threading is not necessary, and seems to slow
# down everything considerably
result_lst = Parallel(n_jobs=self.n_jobs)(
delayed(self._get_Ddiff_row_chunk)(m_i, chunk)
for chunk in chunks)
d_i = np.zeros(self.X.shape[0])
for i_chunk, chunk in enumerate(chunks):
if self.n_jobs >= 4: d_i_chunk = result_lst[i_chunk]
else: d_i_chunk = self._get_Ddiff_row_chunk(m_i, chunk)
d_i[chunk] = d_i_chunk
return d_i
def compute_Lp_matrix(self):
"""See Fouss et al. (2006) and von Luxburg et al. (2007).
See Proposition 6 in von Luxburg (2007) and the inline equations
right in the text above.
"""
self.Lp = sum([1/self.evals[i]
* np.outer(self.rbasis[:, i], self.lbasis[:, i])
for i in range(1, self.evals.size)])
sett.mt(0, 'computed pseudoinverse of Laplacian')
def compute_C_matrix(self):
"""See Fouss et al. (2006) and von Luxburg et al. (2007).
This is the commute-time matrix. It's a squared-euclidian distance
matrix in \mathbb{R}^n.
"""
self.C = np.repeat(np.diag(self.Lp)[:, np.newaxis],
self.Lp.shape[0], axis=1)
self.C += np.repeat(np.diag(self.Lp)[np.newaxis, :],
self.Lp.shape[0], axis=0)
self.C -= 2*self.Lp
# the following is much slower
# self.C = np.zeros(self.Lp.shape)
# for i in range(self.Lp.shape[0]):
# for j in range(self.Lp.shape[1]):
# self.C[i, j] = self.Lp[i, i] + self.Lp[j, j] - 2*self.Lp[i, j]
volG = np.sum(self.z)
self.C *= volG
sett.mt(0, 'computed commute distance matrix')
self.Dchosen = self.C
def compute_MFP_matrix(self):
"""See Fouss et al. (2006).
This is the mean-first passage time matrix. It's not a distance.
Mfp[i, k] := m(k|i) in the notation of Fouss et al. (2006). This
corresponds to the standard notation for transition matrices (left index
initial state, right index final state, i.e. a right-stochastic
matrix, with each row summing to one).
"""
self.MFP = np.zeros(self.Lp.shape)
for i in range(self.Lp.shape[0]):
for k in range(self.Lp.shape[1]):
for j in range(self.Lp.shape[1]):
self.MFP[i, k] += (self.Lp[i, j] - self.Lp[i, k]
- self.Lp[k, j] + self.Lp[k, k]) * self.z[j]
sett.mt(0, 'computed mean first passage time matrix')
self.Dchosen = self.MFP
def set_pseudotime(self):
"""Return pseudotime with respect to root point.
"""
self.pseudotime = self.Dchosen[self.iroot].copy()
self.pseudotime /= np.max(self.pseudotime)
def set_root(self, xroot):
"""Determine the index of the root cell.
Given an expression vector, find the observation index that is closest
to this vector.
Parameters
----------
xroot : np.ndarray
Vector that marks the root cell, the vector storing the initial
condition, only relevant for computing pseudotime.
"""
if self.X.shape[1] != xroot.size:
raise ValueError('The root vector you provided does not have the '
'correct dimension. Make sure you provide the dimension-'
'reduced version, if you provided X_pca.')
# this is the squared distance
dsqroot = 1e10
iroot = 0
for i in range(self.X.shape[0]):
diff = self.X[i, :] - xroot
dsq = diff.dot(diff)
if dsq < dsqroot:
dsqroot = dsq
iroot = i
if np.sqrt(dsqroot) < 1e-10: break
logg.m('setting root index to', iroot, v=4)
if self.iroot is not None and iroot != self.iroot:
logg.warn('Changing index of iroot from {} to {}.'.format(self.iroot, iroot))
self.iroot = iroot
return self.iroot
def _test_embed(self):
"""
Checks and tests for embed.
"""
# pl.semilogy(w,'x',label=r'$ \widetilde K$')
# pl.show()
if sett.verbosity > 2:
# output of spectrum of K for comparison
w, v = np.linalg.eigh(self.K)
sett.mi('spectrum of K (kernel)')
if sett.verbosity > 3:
# direct computation of spectrum of T
w, vl, vr = sp.linalg.eig(self.T, left=True)
sett.mi('spectrum of transition matrix (should be same as of Ktilde)')
|
# Author: Alex Wolf (http://falexwolf.de)
# T. Callies
"""Rank genes according to differential expression.
"""
import numpy as np
import pandas as pd
from math import sqrt, floor
from scipy.sparse import issparse
from .. import utils
from .. import settings
from .. import logging as logg
from ..preprocessing import simple
def rank_genes_groups(
adata,
group_by,
use_raw=True,
groups='all',
reference='rest',
n_genes=100,
compute_distribution=False,
only_positive=True,
copy=False,
test_type='t-test_overestim_var',
correction_factors=None):
"""Rank genes according to differential expression [Wolf17]_.
Rank genes by differential expression. By default, a t-test-like ranking is
used, in which means are normalized with variances.
Parameters
----------
adata : :class:`~scanpy.api.AnnData`
Annotated data matrix.
group_by : `str`
The key of the sample grouping to consider.
use_raw : `bool`, optional (default: `True`)
Use `raw` attribute of `adata` if present.
groups : `str`, `list`, optional (default: `'all'`)
Subset of groups, e.g. `['g1', 'g2', 'g3']`, to which comparison shall
be restricted. If not passed, a ranking will be generated for all
groups.
reference : `str`, optional (default: `'rest'`)
If `'rest'`, compare each group to the union of the rest of the group. If
a group identifier, compare with respect to this group.
n_genes : `int`, optional (default: 100)
The number of genes that appear in the returned tables.
test_type : {'t-test_overestim_var', 't-test', 'wilcoxon', , 't-test_double_overestim_var',
't-test_correction_factors'}, optional (default: 't-test_overestim_var')
If 't-test', use t-test to calculate test statistics. If 'wilcoxon', use
Wilcoxon-Rank-Sum to calculate test statistic. If
't-test_overestim_var', overestimate variance.
't-test_double_overestim_var', additionally, underestimate variance of the rest
't-test_correction_factors', define correction factors manually
only_positive : bool, optional (default: `True`)
Only consider positive differences.
correction_factors: [a,b], optional (default: None)
Only for the test-type 't-test_correction_factors'. Then, a determines correction factor for group variance,
b determines correction factor for variance of the comparison group
Returns
-------
rank_genes_groups_gene_scores : structured `np.ndarray` (adata.uns)
Structured array to be indexed by group id of shape storing the zscore
for each gene for each group.
rank_genes_groups_gene_names : structured `np.ndarray` (adata.uns)
Structured array to be indexed by group id for storing the gene names.
"""
logg.info('rank differentially expressed genes', r=True)
adata = adata.copy() if copy else adata
utils.sanitize_anndata(adata)
if compute_distribution:
logg.warn('`compute_distribution` is deprecated, as it requires storing'
'a shifted and rescaled disribution for each gene'
'You can now run `sc.pl.rank_genes_groups_violin` without it, '
'which will show the original distribution of the gene.')
# for clarity, rename variable
groups_order = groups
if isinstance(groups_order, list) and isinstance(groups_order[0], int):
groups_order = [str(n) for n in groups_order]
if reference != 'rest' and reference not in set(groups_order):
groups_order += [reference]
if (reference != 'rest'
and reference not in set(adata.obs[group_by].cat.categories)):
raise ValueError('reference = {} needs to be one of group_by = {}.'
.format(reference,
adata.obs[group_by].cat.categories.tolist()))
groups_order, groups_masks = utils.select_groups(
adata, groups_order, group_by)
adata.uns['rank_genes_groups_params'] = np.array(
(group_by, reference, test_type, use_raw),
dtype=[('group_by', 'U50'), ('reference', 'U50'), ('test_type', 'U50'), ('use_raw', np.bool_)])
# adata_comp mocks an AnnData object if use_raw is True
# otherwise it's just the AnnData object
adata_comp = adata
if adata.raw is not None and use_raw:
adata_comp = adata.raw
X = adata_comp.X
# for clarity, rename variable
n_genes_user = n_genes
# make sure indices are not OoB in case there are less genes than n_genes
if n_genes_user > X.shape[1]:
n_genes_user = X.shape[1]
# in the following, n_genes is simply another name for the total number of genes
n_genes = X.shape[1]
rankings_gene_zscores = []
rankings_gene_names = []
n_groups = groups_masks.shape[0]
n_genes = X.shape[1]
ns = np.zeros(n_groups, dtype=int)
for imask, mask in enumerate(groups_masks):
ns[imask] = np.where(mask)[0].size
logg.info(' consider \'{}\':'.format(group_by), groups_order,
'with sample numbers', ns)
if reference != 'rest':
ireference = np.where(groups_order == reference)[0][0]
reference_indices = np.arange(adata_comp.n_vars, dtype=int)
avail_tests = {'t-test', 't-test_overestim_var', 'wilcoxon', 't-test_double_overestim_var',
't-test_correction_factors'}
if test_type not in avail_tests:
raise ValueError('test_type should be one of {}.'
'"t-test_overestim_var" is being used as default.'
.format(avail_tests))
if test_type is 't-test_correction_factors':
if correction_factors is None:
raise ValueError('For this test type, you need to enter correction factors manually.')
if len(correction_factors) != 2:
raise ValueError('We need exactly 2 correction factors, accessible via correction_factors[i], i=0,1')
if correction_factors[0]<0 or correction_factors[1]<0:
raise ValueError('Correction factors need to be positive numbers!')
if test_type in {'t-test', 't-test_overestim_var', 't-test_double_overestim_var',
't-test_correction_factors'}:
# loop over all masks and compute means, variances and sample numbers
means = np.zeros((n_groups, n_genes))
vars = np.zeros((n_groups, n_genes))
for imask, mask in enumerate(groups_masks):
means[imask], vars[imask] = simple._get_mean_var(X[mask])
# test each either against the union of all other groups or against a
# specific group
for igroup in range(n_groups):
if reference == 'rest':
mask_rest = ~groups_masks[igroup]
else:
if igroup == ireference: continue
else: mask_rest = groups_masks[ireference]
mean_rest, var_rest = simple._get_mean_var(X[mask_rest])
if test_type == 't-test':
ns_rest = np.where(mask_rest)[0].size
elif test_type == 't-test_correction_factors':
# The tendency is as follows: For the comparison group (rest), overesimate variance --> smaller ns_rest
ns_rest = np.where(mask_rest)[0].size/correction_factors[1]
else: # hack for overestimating the variance
ns_rest = ns[igroup]
if test_type in {'t-test', 't-test_overestim_var'}:
ns_group=ns[igroup]
elif test_type == 't-test_correction_factors':
# We underestimate group variance by increasing denominator, i.e. ns_group
ns_group=ns[igroup]*correction_factors[0]
else :
# We do the opposite of t-test_overestim_var
ns_group=np.where(mask_rest)[0].size
denominator = np.sqrt(vars[igroup]/ns_group + var_rest/ns_rest)
denominator[np.flatnonzero(denominator == 0)] = np.nan
zscores = (means[igroup] - mean_rest) / denominator
zscores[np.isnan(zscores)] = 0
zscores = zscores if only_positive else np.abs(zscores)
partition = np.argpartition(zscores, -n_genes_user)[-n_genes_user:]
partial_indices = np.argsort(zscores[partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
rankings_gene_zscores.append(zscores[global_indices])
rankings_gene_names.append(adata_comp.var_names[global_indices])
if compute_distribution:
mask = groups_masks[igroup]
for gene_counter in range(n_genes_user):
gene_idx = global_indices[gene_counter]
X_col = X[mask, gene_idx]
if issparse(X): X_col = X_col.toarray()[:, 0]
identifier = _build_identifier(group_by, groups_order[igroup],
gene_counter, adata_comp.var_names[gene_idx])
full_col = np.empty(adata.n_obs)
full_col[:] = np.nan
full_col[mask] = (X_col - mean_rest[gene_idx]) / denominator[gene_idx]
adata.obs[identifier] = full_col
elif test_type == 'wilcoxon':
# Wilcoxon-rank-sum test is usually more powerful in detecting marker genes
# Limit maximal RAM that is required by the calculation. Currently set fixed to roughly 100 MByte
CONST_MAX_SIZE = 10000000
ns_rest = np.zeros(n_groups, dtype=int)
# initialize space for z-scores
zscores = np.zeros(n_genes)
# First loop: Loop over all genes
if reference != 'rest':
for imask, mask in enumerate(groups_masks):
if imask == ireference: continue
else: mask_rest = groups_masks[ireference]
ns_rest[imask] = np.where(mask_rest)[0].size
if ns_rest[imask] <= 25 or ns[imask] <= 25:
logg.hint('Few observations in a group for '
'normal approximation (<=25). Lower test accuracy.')
n_active = ns[imask]
m_active = ns_rest[imask]
# Now calculate gene expression ranking in chunkes:
chunk = []
# Calculate chunk frames
n_genes_max_chunk = floor(CONST_MAX_SIZE / (n_active + m_active))
if n_genes_max_chunk < n_genes - 1:
chunk_index = n_genes_max_chunk
while chunk_index < n_genes - 1:
chunk.append(chunk_index)
chunk_index = chunk_index + n_genes_max_chunk
chunk.append(n_genes - 1)
else:
chunk.append(n_genes - 1)
left = 0
# Calculate rank sums for each chunk for the current mask
for chunk_index, right in enumerate(chunk):
# Check if issparse is true: AnnData objects are currently sparse.csr or ndarray.
if issparse(X):
df1 = pd.DataFrame(data=X[mask, left:right].todense())
df2 = pd.DataFrame(data=X[mask_rest, left:right].todense(),
index=np.arange(start=n_active, stop=n_active + m_active))
else:
df1 = pd.DataFrame(data=X[mask, left:right])
df2 = pd.DataFrame(data=X[mask_rest, left:right],
index=np.arange(start=n_active, stop=n_active + m_active))
df1 = df1.append(df2)
ranks = df1.rank()
# sum up adjusted_ranks to calculate W_m,n
zscores[left:right] = np.sum(ranks.loc[0:n_active, :])
left = right + 1
zscores = (zscores - (n_active * (n_active + m_active + 1) / 2)) / sqrt(
(n_active * m_active * (n_active + m_active + 1) / 12))
zscores = zscores if only_positive else np.abs(zscores)
zscores[np.isnan(zscores)] = 0
partition = np.argpartition(zscores, -n_genes_user)[-n_genes_user:]
partial_indices = np.argsort(zscores[partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
rankings_gene_zscores.append(zscores[global_indices])
rankings_gene_names.append(adata_comp.var_names[global_indices])
if compute_distribution:
# Add calculation of means, var: (Unnecessary for wilcoxon if compute distribution=False)
mean, vars = simple._get_mean_var(X[mask])
mean_rest, var_rest = simple._get_mean_var(X[mask_rest])
denominator = np.sqrt(vars / ns[imask] + var_rest / ns_rest[imask])
denominator[np.flatnonzero(denominator == 0)] = np.nan
for gene_counter in range(n_genes_user):
gene_idx = global_indices[gene_counter]
X_col = X[mask, gene_idx]
if issparse(X): X_col = X_col.toarray()[:, 0]
identifier = _build_identifier(group_by, groups_order[imask],
gene_counter, adata_comp.var_names[gene_idx])
full_col = np.empty(adata.n_obs)
full_col[:] = np.nan
full_col[mask] = (X_col - mean_rest[gene_idx]) / denominator[gene_idx]
adata.obs[identifier] = full_col
# If no reference group exists, ranking needs only to be done once (full mask)
else:
zscores = np.zeros((n_groups, n_genes))
chunk = []
n_cells = X.shape[0]
n_genes_max_chunk = floor(CONST_MAX_SIZE / n_cells)
if n_genes_max_chunk < n_genes - 1:
chunk_index = n_genes_max_chunk
while chunk_index < n_genes - 1:
chunk.append(chunk_index)
chunk_index = chunk_index + n_genes_max_chunk
chunk.append(n_genes - 1)
else:
chunk.append(n_genes - 1)
left = 0
for chunk_index, right in enumerate(chunk):
# Check if issparse is true
if issparse(X):
df1 = pd.DataFrame(data=X[:, left:right].todense())
else:
df1 = pd.DataFrame(data=X[:, left:right])
ranks = df1.rank()
# sum up adjusted_ranks to calculate W_m,n
for imask, mask in enumerate(groups_masks):
zscores[imask, left:right] = np.sum(ranks.loc[mask, :])
left = right + 1
for imask, mask in enumerate(groups_masks):
zscores[imask, :] = (zscores[imask, :] - (ns[imask] * (n_cells + 1) / 2)) / sqrt(
(ns[imask] * (n_cells - ns[imask]) * (n_cells + 1) / 12))
zscores = zscores if only_positive else np.abs(zscores)
zscores[np.isnan(zscores)] = 0
partition = np.argpartition(zscores[imask, :], -n_genes_user)[-n_genes_user:]
partial_indices = np.argsort(zscores[imask, partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
rankings_gene_zscores.append(zscores[imask, global_indices])
rankings_gene_names.append(adata_comp.var_names[global_indices])
if compute_distribution:
mean, vars = simple._get_mean_var(X[mask])
mean_rest, var_rest = simple._get_mean_var(X[~mask])
denominator = np.sqrt(vars / ns[imask] + var_rest / (n_cells-ns[imask]))
denominator[np.flatnonzero(denominator == 0)] = np.nan
for gene_counter in range(n_genes_user):
gene_idx = global_indices[gene_counter]
X_col = X[mask, gene_idx]
if issparse(X): X_col = X_col.toarray()[:, 0]
identifier = _build_identifier(group_by, groups_order[imask],
gene_counter, adata_comp.var_names[gene_idx])
full_col = np.empty(adata.n_obs)
full_col[:] = np.nan
full_col[mask] = (X_col - mean_rest[gene_idx]) / denominator[gene_idx]
adata.obs[identifier] = full_col
groups_order_save = [str(g) for g in groups_order]
if reference != 'rest':
groups_order_save = [g for g in groups_order if g != reference]
adata.uns['rank_genes_groups_gene_scores'] = np.rec.fromarrays(
[n for n in rankings_gene_zscores],
dtype=[(rn, 'float32') for rn in groups_order_save])
adata.uns['rank_genes_groups_gene_names'] = np.rec.fromarrays(
[n for n in rankings_gene_names],
dtype=[(rn, 'U50') for rn in groups_order_save])
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint('added\n'
' \'rank_genes_groups_gene_names\', np.recarray to be indexed by group ids (adata.uns)\n'
' \'rank_genes_groups_gene_scores\', np.recarray to be indexed by group ids (adata.uns)')
return adata if copy else None
def _build_identifier(group_by, name, gene_counter, gene_name):
return 'rank_genes_{}_{}_{}_{}'.format(
group_by, name, gene_counter, gene_name)
Erase double line from merge process
# Author: Alex Wolf (http://falexwolf.de)
# T. Callies
"""Rank genes according to differential expression.
"""
import numpy as np
import pandas as pd
from math import sqrt, floor
from scipy.sparse import issparse
from .. import utils
from .. import settings
from .. import logging as logg
from ..preprocessing import simple
def rank_genes_groups(
adata,
group_by,
use_raw=True,
groups='all',
reference='rest',
n_genes=100,
compute_distribution=False,
only_positive=True,
copy=False,
test_type='t-test_overestim_var',
correction_factors=None):
"""Rank genes according to differential expression [Wolf17]_.
Rank genes by differential expression. By default, a t-test-like ranking is
used, in which means are normalized with variances.
Parameters
----------
adata : :class:`~scanpy.api.AnnData`
Annotated data matrix.
group_by : `str`
The key of the sample grouping to consider.
use_raw : `bool`, optional (default: `True`)
Use `raw` attribute of `adata` if present.
groups : `str`, `list`, optional (default: `'all'`)
Subset of groups, e.g. `['g1', 'g2', 'g3']`, to which comparison shall
be restricted. If not passed, a ranking will be generated for all
groups.
reference : `str`, optional (default: `'rest'`)
If `'rest'`, compare each group to the union of the rest of the group. If
a group identifier, compare with respect to this group.
n_genes : `int`, optional (default: 100)
The number of genes that appear in the returned tables.
test_type : {'t-test_overestim_var', 't-test', 'wilcoxon', , 't-test_double_overestim_var',
't-test_correction_factors'}, optional (default: 't-test_overestim_var')
If 't-test', use t-test to calculate test statistics. If 'wilcoxon', use
Wilcoxon-Rank-Sum to calculate test statistic. If
't-test_overestim_var', overestimate variance.
't-test_double_overestim_var', additionally, underestimate variance of the rest
't-test_correction_factors', define correction factors manually
only_positive : bool, optional (default: `True`)
Only consider positive differences.
correction_factors: [a,b], optional (default: None)
Only for the test-type 't-test_correction_factors'. Then, a determines correction factor for group variance,
b determines correction factor for variance of the comparison group
Returns
-------
rank_genes_groups_gene_scores : structured `np.ndarray` (adata.uns)
Structured array to be indexed by group id of shape storing the zscore
for each gene for each group.
rank_genes_groups_gene_names : structured `np.ndarray` (adata.uns)
Structured array to be indexed by group id for storing the gene names.
"""
logg.info('rank differentially expressed genes', r=True)
adata = adata.copy() if copy else adata
utils.sanitize_anndata(adata)
if compute_distribution:
logg.warn('`compute_distribution` is deprecated, as it requires storing'
'a shifted and rescaled disribution for each gene'
'You can now run `sc.pl.rank_genes_groups_violin` without it, '
'which will show the original distribution of the gene.')
# for clarity, rename variable
groups_order = groups
if isinstance(groups_order, list) and isinstance(groups_order[0], int):
groups_order = [str(n) for n in groups_order]
if reference != 'rest' and reference not in set(groups_order):
groups_order += [reference]
if (reference != 'rest'
and reference not in set(adata.obs[group_by].cat.categories)):
raise ValueError('reference = {} needs to be one of group_by = {}.'
.format(reference,
adata.obs[group_by].cat.categories.tolist()))
groups_order, groups_masks = utils.select_groups(
adata, groups_order, group_by)
adata.uns['rank_genes_groups_params'] = np.array(
(group_by, reference, test_type, use_raw),
dtype=[('group_by', 'U50'), ('reference', 'U50'), ('test_type', 'U50'), ('use_raw', np.bool_)])
# adata_comp mocks an AnnData object if use_raw is True
# otherwise it's just the AnnData object
adata_comp = adata
if adata.raw is not None and use_raw:
adata_comp = adata.raw
X = adata_comp.X
# for clarity, rename variable
n_genes_user = n_genes
# make sure indices are not OoB in case there are less genes than n_genes
if n_genes_user > X.shape[1]:
n_genes_user = X.shape[1]
# in the following, n_genes is simply another name for the total number of genes
n_genes = X.shape[1]
rankings_gene_zscores = []
rankings_gene_names = []
n_groups = groups_masks.shape[0]
ns = np.zeros(n_groups, dtype=int)
for imask, mask in enumerate(groups_masks):
ns[imask] = np.where(mask)[0].size
logg.info(' consider \'{}\':'.format(group_by), groups_order,
'with sample numbers', ns)
if reference != 'rest':
ireference = np.where(groups_order == reference)[0][0]
reference_indices = np.arange(adata_comp.n_vars, dtype=int)
avail_tests = {'t-test', 't-test_overestim_var', 'wilcoxon', 't-test_double_overestim_var',
't-test_correction_factors'}
if test_type not in avail_tests:
raise ValueError('test_type should be one of {}.'
'"t-test_overestim_var" is being used as default.'
.format(avail_tests))
if test_type is 't-test_correction_factors':
if correction_factors is None:
raise ValueError('For this test type, you need to enter correction factors manually.')
if len(correction_factors) != 2:
raise ValueError('We need exactly 2 correction factors, accessible via correction_factors[i], i=0,1')
if correction_factors[0]<0 or correction_factors[1]<0:
raise ValueError('Correction factors need to be positive numbers!')
if test_type in {'t-test', 't-test_overestim_var', 't-test_double_overestim_var',
't-test_correction_factors'}:
# loop over all masks and compute means, variances and sample numbers
means = np.zeros((n_groups, n_genes))
vars = np.zeros((n_groups, n_genes))
for imask, mask in enumerate(groups_masks):
means[imask], vars[imask] = simple._get_mean_var(X[mask])
# test each either against the union of all other groups or against a
# specific group
for igroup in range(n_groups):
if reference == 'rest':
mask_rest = ~groups_masks[igroup]
else:
if igroup == ireference: continue
else: mask_rest = groups_masks[ireference]
mean_rest, var_rest = simple._get_mean_var(X[mask_rest])
if test_type == 't-test':
ns_rest = np.where(mask_rest)[0].size
elif test_type == 't-test_correction_factors':
# The tendency is as follows: For the comparison group (rest), overesimate variance --> smaller ns_rest
ns_rest = np.where(mask_rest)[0].size/correction_factors[1]
else: # hack for overestimating the variance
ns_rest = ns[igroup]
if test_type in {'t-test', 't-test_overestim_var'}:
ns_group=ns[igroup]
elif test_type == 't-test_correction_factors':
# We underestimate group variance by increasing denominator, i.e. ns_group
ns_group=ns[igroup]*correction_factors[0]
else :
# We do the opposite of t-test_overestim_var
ns_group=np.where(mask_rest)[0].size
denominator = np.sqrt(vars[igroup]/ns_group + var_rest/ns_rest)
denominator[np.flatnonzero(denominator == 0)] = np.nan
zscores = (means[igroup] - mean_rest) / denominator
zscores[np.isnan(zscores)] = 0
zscores = zscores if only_positive else np.abs(zscores)
partition = np.argpartition(zscores, -n_genes_user)[-n_genes_user:]
partial_indices = np.argsort(zscores[partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
rankings_gene_zscores.append(zscores[global_indices])
rankings_gene_names.append(adata_comp.var_names[global_indices])
if compute_distribution:
mask = groups_masks[igroup]
for gene_counter in range(n_genes_user):
gene_idx = global_indices[gene_counter]
X_col = X[mask, gene_idx]
if issparse(X): X_col = X_col.toarray()[:, 0]
identifier = _build_identifier(group_by, groups_order[igroup],
gene_counter, adata_comp.var_names[gene_idx])
full_col = np.empty(adata.n_obs)
full_col[:] = np.nan
full_col[mask] = (X_col - mean_rest[gene_idx]) / denominator[gene_idx]
adata.obs[identifier] = full_col
elif test_type == 'wilcoxon':
# Wilcoxon-rank-sum test is usually more powerful in detecting marker genes
# Limit maximal RAM that is required by the calculation. Currently set fixed to roughly 100 MByte
CONST_MAX_SIZE = 10000000
ns_rest = np.zeros(n_groups, dtype=int)
# initialize space for z-scores
zscores = np.zeros(n_genes)
# First loop: Loop over all genes
if reference != 'rest':
for imask, mask in enumerate(groups_masks):
if imask == ireference: continue
else: mask_rest = groups_masks[ireference]
ns_rest[imask] = np.where(mask_rest)[0].size
if ns_rest[imask] <= 25 or ns[imask] <= 25:
logg.hint('Few observations in a group for '
'normal approximation (<=25). Lower test accuracy.')
n_active = ns[imask]
m_active = ns_rest[imask]
# Now calculate gene expression ranking in chunkes:
chunk = []
# Calculate chunk frames
n_genes_max_chunk = floor(CONST_MAX_SIZE / (n_active + m_active))
if n_genes_max_chunk < n_genes - 1:
chunk_index = n_genes_max_chunk
while chunk_index < n_genes - 1:
chunk.append(chunk_index)
chunk_index = chunk_index + n_genes_max_chunk
chunk.append(n_genes - 1)
else:
chunk.append(n_genes - 1)
left = 0
# Calculate rank sums for each chunk for the current mask
for chunk_index, right in enumerate(chunk):
# Check if issparse is true: AnnData objects are currently sparse.csr or ndarray.
if issparse(X):
df1 = pd.DataFrame(data=X[mask, left:right].todense())
df2 = pd.DataFrame(data=X[mask_rest, left:right].todense(),
index=np.arange(start=n_active, stop=n_active + m_active))
else:
df1 = pd.DataFrame(data=X[mask, left:right])
df2 = pd.DataFrame(data=X[mask_rest, left:right],
index=np.arange(start=n_active, stop=n_active + m_active))
df1 = df1.append(df2)
ranks = df1.rank()
# sum up adjusted_ranks to calculate W_m,n
zscores[left:right] = np.sum(ranks.loc[0:n_active, :])
left = right + 1
zscores = (zscores - (n_active * (n_active + m_active + 1) / 2)) / sqrt(
(n_active * m_active * (n_active + m_active + 1) / 12))
zscores = zscores if only_positive else np.abs(zscores)
zscores[np.isnan(zscores)] = 0
partition = np.argpartition(zscores, -n_genes_user)[-n_genes_user:]
partial_indices = np.argsort(zscores[partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
rankings_gene_zscores.append(zscores[global_indices])
rankings_gene_names.append(adata_comp.var_names[global_indices])
if compute_distribution:
# Add calculation of means, var: (Unnecessary for wilcoxon if compute distribution=False)
mean, vars = simple._get_mean_var(X[mask])
mean_rest, var_rest = simple._get_mean_var(X[mask_rest])
denominator = np.sqrt(vars / ns[imask] + var_rest / ns_rest[imask])
denominator[np.flatnonzero(denominator == 0)] = np.nan
for gene_counter in range(n_genes_user):
gene_idx = global_indices[gene_counter]
X_col = X[mask, gene_idx]
if issparse(X): X_col = X_col.toarray()[:, 0]
identifier = _build_identifier(group_by, groups_order[imask],
gene_counter, adata_comp.var_names[gene_idx])
full_col = np.empty(adata.n_obs)
full_col[:] = np.nan
full_col[mask] = (X_col - mean_rest[gene_idx]) / denominator[gene_idx]
adata.obs[identifier] = full_col
# If no reference group exists, ranking needs only to be done once (full mask)
else:
zscores = np.zeros((n_groups, n_genes))
chunk = []
n_cells = X.shape[0]
n_genes_max_chunk = floor(CONST_MAX_SIZE / n_cells)
if n_genes_max_chunk < n_genes - 1:
chunk_index = n_genes_max_chunk
while chunk_index < n_genes - 1:
chunk.append(chunk_index)
chunk_index = chunk_index + n_genes_max_chunk
chunk.append(n_genes - 1)
else:
chunk.append(n_genes - 1)
left = 0
for chunk_index, right in enumerate(chunk):
# Check if issparse is true
if issparse(X):
df1 = pd.DataFrame(data=X[:, left:right].todense())
else:
df1 = pd.DataFrame(data=X[:, left:right])
ranks = df1.rank()
# sum up adjusted_ranks to calculate W_m,n
for imask, mask in enumerate(groups_masks):
zscores[imask, left:right] = np.sum(ranks.loc[mask, :])
left = right + 1
for imask, mask in enumerate(groups_masks):
zscores[imask, :] = (zscores[imask, :] - (ns[imask] * (n_cells + 1) / 2)) / sqrt(
(ns[imask] * (n_cells - ns[imask]) * (n_cells + 1) / 12))
zscores = zscores if only_positive else np.abs(zscores)
zscores[np.isnan(zscores)] = 0
partition = np.argpartition(zscores[imask, :], -n_genes_user)[-n_genes_user:]
partial_indices = np.argsort(zscores[imask, partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
rankings_gene_zscores.append(zscores[imask, global_indices])
rankings_gene_names.append(adata_comp.var_names[global_indices])
if compute_distribution:
mean, vars = simple._get_mean_var(X[mask])
mean_rest, var_rest = simple._get_mean_var(X[~mask])
denominator = np.sqrt(vars / ns[imask] + var_rest / (n_cells-ns[imask]))
denominator[np.flatnonzero(denominator == 0)] = np.nan
for gene_counter in range(n_genes_user):
gene_idx = global_indices[gene_counter]
X_col = X[mask, gene_idx]
if issparse(X): X_col = X_col.toarray()[:, 0]
identifier = _build_identifier(group_by, groups_order[imask],
gene_counter, adata_comp.var_names[gene_idx])
full_col = np.empty(adata.n_obs)
full_col[:] = np.nan
full_col[mask] = (X_col - mean_rest[gene_idx]) / denominator[gene_idx]
adata.obs[identifier] = full_col
groups_order_save = [str(g) for g in groups_order]
if reference != 'rest':
groups_order_save = [g for g in groups_order if g != reference]
adata.uns['rank_genes_groups_gene_scores'] = np.rec.fromarrays(
[n for n in rankings_gene_zscores],
dtype=[(rn, 'float32') for rn in groups_order_save])
adata.uns['rank_genes_groups_gene_names'] = np.rec.fromarrays(
[n for n in rankings_gene_names],
dtype=[(rn, 'U50') for rn in groups_order_save])
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint('added\n'
' \'rank_genes_groups_gene_names\', np.recarray to be indexed by group ids (adata.uns)\n'
' \'rank_genes_groups_gene_scores\', np.recarray to be indexed by group ids (adata.uns)')
return adata if copy else None
def _build_identifier(group_by, name, gene_counter, gene_name):
return 'rank_genes_{}_{}_{}_{}'.format(
group_by, name, gene_counter, gene_name)
|
import numpy as np
from scipy.fftpack import fft, ifft
from scikits.talkbox.linpred import lpc
def periodogram(x, nfft=None, fs=1):
"""Compute the periodogram of the given signal, with the given fft size.
Parameters
----------
x : array-like
input signal
nfft : int
size of the fft to compute the periodogram. If None (default), the
length of the signal is used. if nfft > n, the signal is 0 padded.
fs : float
Sampling rate. By default, is 1 (normalized frequency. e.g. 0.5 is the
Nyquist limit).
Returns
-------
pxx : array-like
The psd estimate.
fgrid : array-like
Frequency grid over which the periodogram was estimated.
Examples
--------
Generate a signal with two sinusoids, and compute its periodogram:
>>> fs = 1000
>>> x = np.sin(2 * np.pi * 0.1 * fs * np.linspace(0, 0.5, 0.5*fs))
>>> x += np.sin(2 * np.pi * 0.2 * fs * np.linspace(0, 0.5, 0.5*fs))
>>> px, fx = periodogram(x, 512, fs)
Notes
-----
Only real signals supported for now.
Returns the one-sided version of the periodogram.
Discrepency with matlab: matlab compute the psd in unit of power / radian /
sample, and we compute the psd in unit of power / sample: to get the same
result as matlab, just multiply the result from talkbox by 2pi"""
# TODO: this is basic to the point of being useless:
# - support Daniel smoothing
# - support windowing
# - trend/mean handling
# - one-sided vs two-sided
# - plot
# - support complex input
x = np.atleast_1d(x)
n = x.size
if x.ndim > 1:
raise ValueError("Only rank 1 input supported for now.")
if not np.isrealobj(x):
raise ValueError("Only real input supported for now.")
if not nfft:
nfft = n
if nfft < n:
raise ValueError("nfft < signal size not supported yet")
pxx = np.abs(fft(x, nfft)) ** 2
if nfft % 2 == 0:
pn = nfft / 2 + 1
else:
pn = (nfft + 1 )/ 2
fgrid = np.linspace(0, fs * 0.5, pn)
return pxx[:pn] / (n * fs), fgrid
def arspec(x, order, nfft=None, fs=1):
"""Compute the spectral density using an AR model.
An AR model of the signal is estimated through the Yule-Walker equations;
the estimated AR coefficient are then used to compute the spectrum, which
can be computed explicitely for AR models.
Parameters
----------
x : array-like
input signal
order : int
Order of the LPC computation.
nfft : int
size of the fft to compute the periodogram. If None (default), the
length of the signal is used. if nfft > n, the signal is 0 padded.
fs : float
Sampling rate. By default, is 1 (normalized frequency. e.g. 0.5 is the
Nyquist limit).
Returns
-------
pxx : array-like
The psd estimate.
fgrid : array-like
Frequency grid over which the periodogram was estimated.
"""
x = np.atleast_1d(x)
n = x.size
if x.ndim > 1:
raise ValueError("Only rank 1 input supported for now.")
if not np.isrealobj(x):
raise ValueError("Only real input supported for now.")
if not nfft:
nfft = n
if nfft < n:
raise ValueError("nfft < signal size not supported yet")
a, e, k = lpc(x, order)
# This is not enough to deal correctly with even/odd size
if nfft % 2 == 0:
pn = nfft / 2 + 1
else:
pn = (nfft + 1 )/ 2
px = 1 / np.fft.fft(a, nfft)[:pn]
pxx = np.real(np.conj(px) * px)
pxx /= fs / e
fx = np.linspace(0, fs * 0.5, pxx.size)
return pxx, fx
Add split cosine taper for periodogram.
import numpy as np
from scipy.fftpack import fft, ifft
from scikits.talkbox.linpred import lpc
def periodogram(x, nfft=None, fs=1):
"""Compute the periodogram of the given signal, with the given fft size.
Parameters
----------
x : array-like
input signal
nfft : int
size of the fft to compute the periodogram. If None (default), the
length of the signal is used. if nfft > n, the signal is 0 padded.
fs : float
Sampling rate. By default, is 1 (normalized frequency. e.g. 0.5 is the
Nyquist limit).
Returns
-------
pxx : array-like
The psd estimate.
fgrid : array-like
Frequency grid over which the periodogram was estimated.
Examples
--------
Generate a signal with two sinusoids, and compute its periodogram:
>>> fs = 1000
>>> x = np.sin(2 * np.pi * 0.1 * fs * np.linspace(0, 0.5, 0.5*fs))
>>> x += np.sin(2 * np.pi * 0.2 * fs * np.linspace(0, 0.5, 0.5*fs))
>>> px, fx = periodogram(x, 512, fs)
Notes
-----
Only real signals supported for now.
Returns the one-sided version of the periodogram.
Discrepency with matlab: matlab compute the psd in unit of power / radian /
sample, and we compute the psd in unit of power / sample: to get the same
result as matlab, just multiply the result from talkbox by 2pi"""
# TODO: this is basic to the point of being useless:
# - support Daniel smoothing
# - support windowing
# - trend/mean handling
# - one-sided vs two-sided
# - plot
# - support complex input
x = np.atleast_1d(x)
n = x.size
if x.ndim > 1:
raise ValueError("Only rank 1 input supported for now.")
if not np.isrealobj(x):
raise ValueError("Only real input supported for now.")
if not nfft:
nfft = n
if nfft < n:
raise ValueError("nfft < signal size not supported yet")
pxx = np.abs(fft(x, nfft)) ** 2
if nfft % 2 == 0:
pn = nfft / 2 + 1
else:
pn = (nfft + 1 )/ 2
fgrid = np.linspace(0, fs * 0.5, pn)
return pxx[:pn] / (n * fs), fgrid
def arspec(x, order, nfft=None, fs=1):
"""Compute the spectral density using an AR model.
An AR model of the signal is estimated through the Yule-Walker equations;
the estimated AR coefficient are then used to compute the spectrum, which
can be computed explicitely for AR models.
Parameters
----------
x : array-like
input signal
order : int
Order of the LPC computation.
nfft : int
size of the fft to compute the periodogram. If None (default), the
length of the signal is used. if nfft > n, the signal is 0 padded.
fs : float
Sampling rate. By default, is 1 (normalized frequency. e.g. 0.5 is the
Nyquist limit).
Returns
-------
pxx : array-like
The psd estimate.
fgrid : array-like
Frequency grid over which the periodogram was estimated.
"""
x = np.atleast_1d(x)
n = x.size
if x.ndim > 1:
raise ValueError("Only rank 1 input supported for now.")
if not np.isrealobj(x):
raise ValueError("Only real input supported for now.")
if not nfft:
nfft = n
if nfft < n:
raise ValueError("nfft < signal size not supported yet")
a, e, k = lpc(x, order)
# This is not enough to deal correctly with even/odd size
if nfft % 2 == 0:
pn = nfft / 2 + 1
else:
pn = (nfft + 1 )/ 2
px = 1 / np.fft.fft(a, nfft)[:pn]
pxx = np.real(np.conj(px) * px)
pxx /= fs / e
fx = np.linspace(0, fs * 0.5, pxx.size)
return pxx, fx
def taper(n, p=0.1):
"""Return a split cosine bell taper (or window)
Parameters
----------
n: int
number of samples of the taper
p: float
proportion of taper (0 <= p <= 1.)
Note
----
p represents the proportion of tapered (or "smoothed") data compared to a
boxcar.
"""
if p > 1. or p < 0:
raise ValueError("taper proportion should be betwen 0 and 1 (was %f)"
% p)
w = np.ones(n)
ntp = np.floor(0.5 * n * p)
w[:ntp] = 0.5 * (1 - np.cos(np.pi * 2 * np.linspace(0, 0.5, ntp)))
w[-ntp:] = 0.5 * (1 - np.cos(np.pi * 2 * np.linspace(0.5, 0, ntp)))
return w
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# ezIBpy: Pythonic Wrapper for IbPy
# https://github.com/ranaroussi/ezibpy
#
# Copyright 2015 Ran Aroussi
#
# Licensed under the GNU Lesser General Public License, v3.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.gnu.org/licenses/lgpl-3.0.en.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from datetime import datetime
from pandas import DataFrame
from ib.opt import Connection
from ib.ext.Contract import Contract
from ib.ext.Order import Order
from ezibpy.utils import dataTypes
# =============================================================
# set debugging mode
# levels: DEBUG, INFO, WARNING, ERROR, CRITICAL
# filename=LOG_FILENAME
# =============================================================
import logging
# import sys
# logging.basicConfig(stream=sys.stdout, level=self.log(mode="debug", msg=
# format='%(asctime)s [%(levelname)s]: %(message)s')
class ezIBpy():
def log(self, mode, msg):
if self.logging:
if mode == "debug":
logging.debug(msg)
elif mode == "info":
logging.info(msg)
elif mode == "warning":
logging.warning(msg)
elif mode == "error":
logging.error(msg)
elif mode == "critical":
logging.critical(msg)
def roundClosestValid(self, val, res, decimals=2):
""" round to closest resolution """
return round(round(val / res)*res, decimals)
"""
https://www.interactivebrokers.com/en/software/api/apiguide/java/java_eclientsocket_methods.htm
"""
# ---------------------------------------------------------
def __init__(self):
self.__version__ = 0.09
self.logging = False
self.clientId = 1
self.port = 4001 # 7496/7497 = TWS, 4001 = IBGateway
self.host = "localhost"
self.ibConn = None
self.time = 0
self.commission = 0
self.accountCode = 0
self.orderId = 1
# auto-construct for every contract/order
self.tickerIds = { 0: "SYMBOL" }
self.contracts = {}
self.orders = {}
self.symbol_orders = {}
self.account = {}
self.positions = {}
self.portfolio = {}
# holds market data
tickDF = DataFrame({
"datetime":[0], "bid":[0], "bidsize":[0],
"ask":[0], "asksize":[0], "last":[0], "lastsize":[0]
})
tickDF.set_index('datetime', inplace=True)
self.marketData = { 0: tickDF } # idx = tickerId
# holds orderbook data
l2DF = DataFrame(index=range(5), data={
"bid":0, "bidsize":0,
"ask":0, "asksize":0
})
self.marketDepthData = { 0: l2DF } # idx = tickerId
# trailing stops
self.trailingStops = {}
# "tickerId" = {
# orderId: ...
# lastPrice: ...
# trailPercent: ...
# trailAmount: ...
# quantity: ...
# }
# triggerable trailing stops
self.triggerableTrailingStops = {}
# "tickerId" = {
# parentId: ...
# stopOrderId: ...
# triggerPrice: ...
# trailPercent: ...
# trailAmount: ...
# quantity: ...
# }
# @TODO - options data
# optionDF = DataFrame({ "datetime":[0], "bid":[0], "ask":[0], "last":[0], "impliedVol":[0], "delta":[0], "optPrice":[0], "pvDividend":[0], "gamma":[0], "vega":[0], "theta":[0], "undPrice":[0] })
# optionDF.set_index('datetime', inplace=True)
# optionDF = {
# "bid": DataFrame({ "impliedVol":[0], "delta":[0], "optPrice":[0], "pvDividend":[0], "gamma":[0], "vega":[0], "theta":[0], "undPrice":[0] }),
# "ask": DataFrame({ "impliedVol":[0], "delta":[0], "optPrice":[0], "pvDividend":[0], "gamma":[0], "vega":[0], "theta":[0], "undPrice":[0] }),
# "last": DataFrame({ "impliedVol":[0], "delta":[0], "optPrice":[0], "pvDividend":[0], "gamma":[0], "vega":[0], "theta":[0], "undPrice":[0] }),
# "model": DataFrame({ "impliedVol":[0], "delta":[0], "optPrice":[0], "pvDividend":[0], "gamma":[0], "vega":[0], "theta":[0], "undPrice":[0] })
# }
# self.optionsData = { 0: optionDF }
# historical data contrainer
self.historicalData = { } # idx = symbol
# ---------------------------------------------------------
def connect(self, clientId=0, host="localhost", port=4001):
""" Establish connection to TWS/IBGW """
self.clientId = clientId
self.host = host
self.port = port
self.ibConn = Connection.create(
host = self.host,
port = self.port,
clientId = self.clientId
)
# Assign error handling function.
self.ibConn.register(self.handleErrorEvents, 'Error')
# Assign server messages handling function.
self.ibConn.registerAll(self.handleServerEvents)
# connect
self.log(mode="info", msg="[CONNECTING TO IB]")
self.ibConn.connect()
# get server time
self.getServerTime()
# subscribe to position and account changes
self.subscribePositions = False
self.requestPositionUpdates(subscribe=True)
self.subscribeAccount = False
self.requestAccountUpdates(subscribe=True)
# ---------------------------------------------------------
def disconnect(self):
""" Disconnect from TWS/IBGW """
if self.ibConn is not None:
self.log(mode="info", msg="[DISCONNECT TO IB]")
self.ibConn.disconnect()
# ---------------------------------------------------------
def getServerTime(self):
""" get the current time on IB """
self.ibConn.reqCurrentTime()
# ---------------------------------------------------------
# Start event handlers
# ---------------------------------------------------------
def handleErrorEvents(self, msg):
""" logs error messages """
# https://www.interactivebrokers.com/en/software/api/apiguide/tables/api_message_codes.htm
if msg.errorCode != -1: # and msg.errorCode != 2104 and msg.errorCode != 2106:
self.log(mode="error", msg=msg)
# ---------------------------------------------------------
def handleServerEvents(self, msg):
""" dispatch msg to the right handler """
if msg.typeName == "error":
self.log(mode="error", msg="[IB ERROR] "+str(msg))
elif msg.typeName == dataTypes["MSG_CURRENT_TIME"]:
if self.time < msg.time:
self.time = msg.time
elif (msg.typeName == dataTypes["MSG_TYPE_MKT_DEPTH"] or
msg.typeName == dataTypes["MSG_TYPE_MKT_DEPTH_L2"]):
self.handleMarketDepth(msg)
elif msg.typeName == dataTypes["MSG_TYPE_TICK_STRING"]:
self.handleTickString(msg)
elif msg.typeName == dataTypes["MSG_TYPE_TICK_PRICE"]:
self.handleTickPrice(msg)
elif msg.typeName == dataTypes["MSG_TYPE_TICK_SIZE"]:
self.handleTickSize(msg)
elif msg.typeName == dataTypes["MSG_TYPE_TICK_OPTION"]:
self.handleTickOptionComputation(msg)
elif (msg.typeName == dataTypes["MSG_TYPE_OPEN_ORDER"] or
msg.typeName == dataTypes["MSG_TYPE_ORDER_STATUS"]):
self.handleOrders(msg)
elif msg.typeName == dataTypes["MSG_TYPE_HISTORICAL_DATA"]:
self.handleHistoricalData(msg)
elif msg.typeName == dataTypes["MSG_TYPE_ACCOUNT_UPDATES"]:
self.handleAccount(msg)
elif msg.typeName == dataTypes["MSG_TYPE_PORTFOLIO_UPDATES"]:
self.handlePortfolio(msg)
elif msg.typeName == dataTypes["MSG_TYPE_POSITION"]:
self.handlePosition(msg)
elif msg.typeName == dataTypes["MSG_TYPE_MANAGED_ACCOUNTS"]:
self.accountCode = msg.accountsList
elif msg.typeName == dataTypes["MSG_TYPE_NEXT_ORDER_ID"]:
self.orderId = msg.orderId
elif msg.typeName == dataTypes["MSG_COMMISSION_REPORT"]:
self.commission = msg.commissionReport.m_commission
else:
self.log(mode="info", msg="[SERVER]: "+ str(msg))
pass
# ---------------------------------------------------------
# generic callback function - can be used externally
# ---------------------------------------------------------
def ibCallback(self, caller, msg, **kwargs):
pass
# ---------------------------------------------------------
# Start admin handlers
# ---------------------------------------------------------
def handleAccount(self, msg):
"""
handle account info update
https://www.interactivebrokers.com/en/software/api/apiguide/java/updateaccountvalue.htm
"""
track = ["BuyingPower", "CashBalance", "DayTradesRemaining",
"NetLiquidation", "InitMarginReq", "MaintMarginReq",
"AvailableFunds", "AvailableFunds-C", "AvailableFunds-S"]
if msg.key in track:
# self.log(mode="info", msg="[ACCOUNT]: " + str(msg))
self.account[msg.key] = float(msg.value)
# fire callback
self.ibCallback(caller="handleAccount", msg=msg)
# ---------------------------------------------------------
def handlePosition(self, msg):
""" handle positions changes """
# contract identifier
contractString = self.contractString(msg.contract)
# if msg.pos != 0 or contractString in self.contracts.keys():
self.log(mode="info", msg="[POSITION]: " + str(msg))
self.positions[contractString] = {
"symbol": contractString,
"position": int(msg.pos),
"avgCost": float(msg.avgCost),
"account": msg.account
}
# fire callback
self.ibCallback(caller="handlePosition", msg=msg)
# ---------------------------------------------------------
def handlePortfolio(self, msg):
""" handle portfolio updates """
self.log(mode="info", msg="[PORTFOLIO]: " + str(msg))
# contract identifier
contractString = self.contractString(msg.contract)
self.portfolio[contractString] = {
"symbol": contractString,
"position": int(msg.position),
"marketPrice": float(msg.marketPrice),
"marketValue": float(msg.marketValue),
"averageCost": float(msg.averageCost),
"unrealizedPNL": float(msg.unrealizedPNL),
"realizedPNL": float(msg.realizedPNL),
"account": msg.accountName
}
# fire callback
self.ibCallback(caller="handlePortfolio", msg=msg)
# ---------------------------------------------------------
def handleOrders(self, msg):
""" handle order open & status """
"""
It is possible that orderStatus() may return duplicate messages.
It is essential that you filter the message accordingly.
"""
self.log(mode="info", msg="[ORDER]: " + str(msg))
# get server time
self.getServerTime()
time.sleep(0.001)
# we need to handle mutiple events for the same order status
duplicateMessage = False;
# open order
if msg.typeName == dataTypes["MSG_TYPE_OPEN_ORDER"]:
# contract identifier
contractString = self.contractString(msg.contract)
if msg.orderId in self.orders:
duplicateMessage = True
else:
self.orders[msg.orderId] = {
"id": msg.orderId,
"symbol": contractString,
"contract": msg.contract,
"status": "OPENED",
"reason": None,
"avgFillPrice": 0.,
"parentId": 0,
"time": datetime.fromtimestamp(int(self.time))
}
# order status
elif msg.typeName == dataTypes["MSG_TYPE_ORDER_STATUS"]:
if msg.orderId in self.orders and self.orders[msg.orderId]['status'] == msg.status.upper():
duplicateMessage = True
else:
self.orders[msg.orderId]['status'] = msg.status.upper()
self.orders[msg.orderId]['reason'] = msg.whyHeld
self.orders[msg.orderId]['avgFillPrice'] = float(msg.avgFillPrice)
self.orders[msg.orderId]['parentId'] = int(msg.parentId)
self.orders[msg.orderId]['time'] = datetime.fromtimestamp(int(self.time))
# remove from orders?
# if msg.status.upper() == 'CANCELLED':
# del self.orders[msg.orderId]
# fire callback
if duplicateMessage == False:
self.ibCallback(caller="handleOrders", msg=msg)
# group orders by symbol
self.symbol_orders = self.group_orders("symbol")
# ---------------------------------------------------------
def group_orders(self, by="symbol"):
orders = {}
for orderId in self.orders:
order = self.orders[orderId]
if order[by] not in orders.keys():
orders[order[by]] = {}
try: del order["contract"]
except: pass
orders[order[by]][order['id']] = order
return orders
# ---------------------------------------------------------
# Start price handlers
# ---------------------------------------------------------
def handleMarketDepth(self, msg):
"""
https://www.interactivebrokers.com/en/software/api/apiguide/java/updatemktdepth.htm
https://www.interactivebrokers.com/en/software/api/apiguide/java/updatemktdepthl2.htm
"""
# make sure symbol exists
if msg.tickerId not in self.marketDepthData.keys():
self.marketDepthData[msg.tickerId] = self.marketDepthData[0].copy()
# bid
if msg.side == 1:
self.marketDepthData[msg.tickerId].loc[msg.position, "bid"] = msg.price
self.marketDepthData[msg.tickerId].loc[msg.position, "bidsize"] = msg.size
# ask
elif msg.side == 0:
self.marketDepthData[msg.tickerId].loc[msg.position, "ask"] = msg.price
self.marketDepthData[msg.tickerId].loc[msg.position, "asksize"] = msg.size
"""
# bid/ask spread / vol diff
self.marketDepthData[msg.tickerId].loc[msg.position, "spread"] = \
self.marketDepthData[msg.tickerId].loc[msg.position, "ask"]-\
self.marketDepthData[msg.tickerId].loc[msg.position, "bid"]
self.marketDepthData[msg.tickerId].loc[msg.position, "spreadsize"] = \
self.marketDepthData[msg.tickerId].loc[msg.position, "asksize"]-\
self.marketDepthData[msg.tickerId].loc[msg.position, "bidsize"]
"""
self.ibCallback(caller="handleMarketDepth", msg=msg)
# ---------------------------------------------------------
def handleHistoricalData(self, msg):
# self.log(mode="debug", msg="[HISTORY]: " + str(msg))
print('.', end="",flush=True)
if msg.date[:8].lower() == 'finished':
# print(self.historicalData)
if self.csv_path != None:
for sym in self.historicalData:
# print("[HISTORY FINISHED]: " + str(sym.upper()))
# contractString = self.contractString(str(sym))
contractString = str(sym)
print("[HISTORY FINISHED]: " + contractString)
self.historicalData[sym].to_csv(
self.csv_path + contractString +'.csv'
);
print('.')
# fire callback
self.ibCallback(caller="handleHistoricalData", msg=msg, completed=True)
else:
# create tick holder for ticker
if len(msg.date) <= 8: # daily
ts = datetime.strptime(msg.date, dataTypes["DATE_FORMAT"])
ts = ts.strftime(dataTypes["DATE_FORMAT_HISTORY"])
else:
ts = datetime.fromtimestamp(int(msg.date))
ts = ts.strftime(dataTypes["DATE_TIME_FORMAT_LONG"])
hist_row = DataFrame(index=['datetime'], data={
"datetime":ts, "O":msg.open, "H":msg.high,
"L":msg.low, "C":msg.close, "V":msg.volume,
"OI":msg.count, "WAP": msg.WAP })
hist_row.set_index('datetime', inplace=True)
symbol = self.tickerSymbol(msg.reqId)
if symbol not in self.historicalData.keys():
self.historicalData[symbol] = hist_row
else:
self.historicalData[symbol] = self.historicalData[symbol].append(hist_row)
# fire callback
self.ibCallback(caller="handleHistoricalData", msg=msg, completed=False)
# ---------------------------------------------------------
def handleTickPrice(self, msg):
"""
holds latest tick bid/ask/last price
"""
# self.log(mode="debug", msg="[TICK PRICE]: " + dataTypes["PRICE_TICKS"][msg.field] + " - " + str(msg))
# return
# create tick holder for ticker
if msg.tickerId not in self.marketData.keys():
self.marketData[msg.tickerId] = self.marketData[0].copy()
# bid price
if msg.canAutoExecute == 1 and msg.field == dataTypes["FIELD_BID_PRICE"]:
self.marketData[msg.tickerId]['bid'] = float(msg.price)
# ask price
elif msg.canAutoExecute == 1 and msg.field == dataTypes["FIELD_ASK_PRICE"]:
self.marketData[msg.tickerId]['ask'] = float(msg.price)
# last price
elif msg.field == dataTypes["FIELD_LAST_PRICE"]:
self.marketData[msg.tickerId]['last'] = float(msg.price)
# fire callback
self.ibCallback(caller="handleTickPrice", msg=msg)
# ---------------------------------------------------------
def handleTickSize(self, msg):
"""
holds latest tick bid/ask/last size
"""
# create tick holder for ticker
if msg.tickerId not in self.marketData.keys():
self.marketData[msg.tickerId] = self.marketData[0].copy()
# bid size
if msg.field == dataTypes["FIELD_BID_SIZE"]:
self.marketData[msg.tickerId]['bidsize'] = int(msg.size)
# ask size
elif msg.field == dataTypes["FIELD_ASK_SIZE"]:
self.marketData[msg.tickerId]['asksize'] = int(msg.size)
# last size
elif msg.field == dataTypes["FIELD_LAST_SIZE"]:
self.marketData[msg.tickerId]['lastsize'] = int(msg.size)
# fire callback
self.ibCallback(caller="handleTickSize", msg=msg)
# ---------------------------------------------------------
def handleTickString(self, msg):
"""
holds latest tick bid/ask/last timestamp
"""
# create tick holder for ticker
if msg.tickerId not in self.marketData.keys():
self.marketData[msg.tickerId] = self.marketData[0].copy()
# update timestamp
if msg.tickType == dataTypes["FIELD_LAST_TIMESTAMP"]:
ts = datetime.fromtimestamp(int(msg.value)) \
.strftime(dataTypes["DATE_TIME_FORMAT_LONG_MILLISECS"])
self.marketData[msg.tickerId].index = [ts]
# self.log(mode="debug", msg="[TICK TS]: " + ts)
# handle trailing stop orders
self.triggerTrailingStops(msg.tickerId)
self.handleTrailingStops(msg.tickerId)
# fire callback
self.ibCallback(caller="handleTickString", msg=msg)
elif (msg.tickType == dataTypes["FIELD_RTVOLUME"]):
# self.log(mode="info", msg="[RTVOL]: " + str(msg))
tick = dataTypes["RTVOL_TICKS"]
(tick['price'], tick['size'], tick['time'], tick['volume'],
tick['wap'], tick['single']) = msg.value.split(';')
try:
tick['last'] = float(tick['price'])
tick['lastsize'] = float(tick['size'])
tick['volume'] = float(tick['volume'])
tick['wap'] = float(tick['wap'])
tick['single'] = tick['single'] == 'true'
tick['instrument'] = self.tickerSymbol(msg.tickerId)
# parse time
s, ms = divmod(int(tick['time']), 1000)
tick['time'] = '{}.{:03d}'.format(
time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(s)), ms)
# add most recent bid/ask to "tick"
tick['bid'] = round(self.marketData[msg.tickerId]['bid'][0], 2)
tick['bidsize'] = round(self.marketData[msg.tickerId]['bidsize'][0], 2)
tick['ask'] = round(self.marketData[msg.tickerId]['ask'][0], 2)
tick['asksize'] = round(self.marketData[msg.tickerId]['asksize'][0], 2)
# self.log(mode="debug", msg=tick['time'] + ':' + self.tickerSymbol(msg.tickerId) + "-" + str(tick))
# fire callback
self.ibCallback(caller="handleTickString", msg=msg, tick=tick)
except:
pass
else:
# self.log(mode="info", msg="tickString" + "-" + msg)
# fire callback
self.ibCallback(caller="handleTickString", msg=msg)
# print(msg)
# ---------------------------------------------------------
def handleTickOptionComputation(self, msg):
"""
holds latest option data timestamp
only option price is kept at the moment
https://www.interactivebrokers.com/en/software/api/apiguide/java/tickoptioncomputation.htm
"""
# create tick holder for ticker
if msg.tickerId not in self.marketData.keys():
self.marketData[msg.tickerId] = self.marketData[0].copy()
# bid
if msg.field == dataTypes["FIELD_BID_OPTION_COMPUTATION"]:
self.marketData[msg.tickerId]['bid'] = float(msg.optPrice)
# ask
elif msg.field == dataTypes["FIELD_ASK_OPTION_COMPUTATION"]:
self.marketData[msg.tickerId]['ask'] = float(msg.optPrice)
# last
elif msg.field == dataTypes["FIELD_LAST_OPTION_COMPUTATION"]:
self.marketData[msg.tickerId]['last'] = float(msg.optPrice)
# print(msg)
# fire callback
self.ibCallback(caller="handleTickOptionComputation", msg=msg)
# ---------------------------------------------------------
# trailing stops
# ---------------------------------------------------------
def createTriggerableTrailingStop(self, symbol, quantity=1, \
triggerPrice=0, trailPercent=100., trailAmount=0., \
parentId=0, stopOrderId=None, ticksize=0.01):
""" adds order to triggerable list """
self.triggerableTrailingStops[symbol] = {
"parentId": parentId,
"stopOrderId": stopOrderId,
"triggerPrice": triggerPrice,
"trailAmount": abs(trailAmount),
"trailPercent": abs(trailPercent),
"quantity": quantity,
"ticksize": ticksize
}
return self.triggerableTrailingStops[symbol]
# ---------------------------------------------------------
def registerTrailingStop(self, tickerId, orderId=0, quantity=1, \
lastPrice=0, trailPercent=100., trailAmount=0., parentId=0, ticksize=0.01):
""" adds trailing stop to monitor list """
trailingStop = self.trailingStops[tickerId] = {
"orderId": orderId,
"parentId": parentId,
"lastPrice": lastPrice,
"trailAmount": trailAmount,
"trailPercent": trailPercent,
"quantity": quantity,
"ticksize": ticksize
}
return trailingStop
# ---------------------------------------------------------
def modifyStopOrder(self, orderId, parentId, newStop, quantity):
""" modify stop order """
if orderId in self.orders.keys():
order = self.createStopOrder(
quantity = quantity,
parentId = parentId,
stop = newStop,
trail = False,
transmit = True
)
return self.placeOrder(self.orders[orderId]['contract'], order, orderId)
return None
# ---------------------------------------------------------
def handleTrailingStops(self, tickerId):
""" software-based trailing stop """
# existing?
if tickerId not in self.trailingStops.keys():
return None
# continue
trailingStop = self.trailingStops[tickerId]
price = self.marketData[tickerId]['last'][0]
symbol = self.tickerSymbol(tickerId)
# contract = self.contracts[tickerId]
# contractString = self.contractString(contract)
# filled / no positions?
if (self.positions[symbol] == 0) | \
(self.orders[trailingStop['orderId']]['status'] == "FILLED"):
del self.trailingStops[tickerId]
return None
# continue...
newStop = trailingStop['lastPrice']
ticksize = trailingStop['ticksize']
# long
if (trailingStop['quantity'] < 0) & (trailingStop['lastPrice'] < price):
if abs(trailingStop['trailAmount']) > 0:
newStop = price - abs(trailingStop['trailAmount'])
elif trailingStop['trailPercent'] > 0:
newStop = price - (price*(abs(trailingStop['trailPercent'])/100))
# short
elif (trailingStop['quantity'] > 0) & (trailingStop['lastPrice'] > price):
if abs(trailingStop['trailAmount']) > 0:
newStop = price + abs(trailingStop['trailAmount'])
elif trailingStop['trailPercent'] > 0:
newStop = price + (price*(abs(trailingStop['trailPercent'])/100))
# valid newStop
newStop = self.roundClosestValid(newStop, ticksize)
print("\n\n", trailingStop['lastPrice'], newStop, price, "\n\n")
# no change?
if newStop == trailingStop['lastPrice']:
return None
# submit order
trailingStopOrderId = self.modifyStopOrder(
orderId = trailingStop['orderId'],
parentId = trailingStop['parentId'],
newStop = newStop,
quantity = trailingStop['quantity']
)
if trailingStopOrderId:
self.trailingStops[tickerId]['lastPrice'] = price
return trailingStopOrderId
# ---------------------------------------------------------
def triggerTrailingStops(self, tickerId):
""" trigger waiting trailing stops """
# print('.')
# test
symbol = self.tickerSymbol(tickerId)
price = self.marketData[tickerId]['last'][0]
# contract = self.contracts[tickerId]
if symbol in self.triggerableTrailingStops.keys():
pendingOrder = self.triggerableTrailingStops[symbol]
parentId = pendingOrder["parentId"]
stopOrderId = pendingOrder["stopOrderId"]
triggerPrice = pendingOrder["triggerPrice"]
trailAmount = pendingOrder["trailAmount"]
trailPercent = pendingOrder["trailPercent"]
quantity = pendingOrder["quantity"]
ticksize = pendingOrder["ticksize"]
# print(">>>>>>>", pendingOrder)
# print(">>>>>>>", parentId)
# print(">>>>>>>", self.orders)
# abort
if parentId not in self.orders.keys():
# print("DELETING")
del self.triggerableTrailingStops[symbol]
return None
else:
if self.orders[parentId]["status"] != "FILLED":
return None
# print("\n\n", quantity, triggerPrice, price, "\n\n")
# create the order
if ((quantity > 0) & (triggerPrice >= price)) | ((quantity < 0) & (triggerPrice <= price)) :
newStop = price
if trailAmount > 0:
if quantity > 0:
newStop += trailAmount
else:
newStop -= trailAmount
elif trailPercent > 0:
if quantity > 0:
newStop += price*(trailPercent/100)
else:
newStop -= price*(trailPercent/100)
else:
del self.triggerableTrailingStops[symbol]
return 0
# print("------", stopOrderId , parentId, newStop , quantity, "------")
# use valid newStop
newStop = self.roundClosestValid(newStop, ticksize)
trailingStopOrderId = self.modifyStopOrder(
orderId = stopOrderId,
parentId = parentId,
newStop = newStop,
quantity = quantity
)
if trailingStopOrderId:
# print(">>> TRAILING STOP")
del self.triggerableTrailingStops[symbol]
# register trailing stop
tickerId = self.tickerId(symbol)
self.registerTrailingStop(
tickerId = tickerId,
parentId = parentId,
orderId = stopOrderId,
lastPrice = price,
trailAmount = trailAmount,
trailPercent = trailPercent,
quantity = quantity,
ticksize = ticksize
)
return trailingStopOrderId
return None
# ---------------------------------------------------------
# tickerId/Symbols constructors
# ---------------------------------------------------------
def tickerId(self, symbol):
"""
returns the tickerId for the symbol or
sets one if it doesn't exits
"""
for tickerId in self.tickerIds:
if symbol == self.tickerIds[tickerId]:
return tickerId
break
else:
tickerId = len(self.tickerIds)
self.tickerIds[tickerId] = symbol
return tickerId
# ---------------------------------------------------------
def tickerSymbol(self, tickerId):
""" returns the symbol of a tickerId """
try:
return self.tickerIds[tickerId]
except:
return ""
# ---------------------------------------------------------
def contractString(self, contract, seperator="_"):
""" returns string from contract tuple """
localSymbol = ""
contractTuple = contract
if type(contract) != tuple:
localSymbol = contract.m_localSymbol
contractTuple = (contract.m_symbol, contract.m_secType,
contract.m_exchange, contract.m_currency, contract.m_expiry,
contract.m_strike, contract.m_right)
# build identifier
try:
if contractTuple[1] in ("OPT", "FOP"):
# contractString = (contractTuple[0], contractTuple[1], contractTuple[6], contractTuple[4], contractTuple[5])
if contractTuple[5]*100 - int(contractTuple[5]*100):
strike = contractTuple[5]
else:
strike = "{0:.2f}".format(contractTuple[5])
contractString = (contractTuple[0] + str(contractTuple[4]) + \
contractTuple[6], str(strike).replace(".", ""))
elif contractTuple[1] == "FUT":
# round expiry day to expiry month
if localSymbol != "":
exp = localSymbol[2:-1]+str(contractTuple[4][:4])
else:
exp = str(contractTuple[4])[:6]
exp = dataTypes["MONTH_CODES"][int(exp[4:6])] + str(int(exp[:4]))
contractString = (contractTuple[0] + exp, contractTuple[1])
elif contractTuple[1] == "CASH":
contractString = (contractTuple[0]+contractTuple[3], contractTuple[1])
else: # STK
contractString = (contractTuple[0], contractTuple[1])
# construct string
contractString = seperator.join(
str(v) for v in contractString).replace(seperator+"STK", "")
except:
contractString = contractTuple[0]
return contractString
# ---------------------------------------------------------
# contract constructors
# ---------------------------------------------------------
def createContract(self, contractTuple, **kwargs):
# https://www.interactivebrokers.com/en/software/api/apiguide/java/contract.htm
contractString = self.contractString(contractTuple)
# print(contractString)
# get (or set if not set) the tickerId for this symbol
# tickerId = self.tickerId(contractTuple[0])
tickerId = self.tickerId(contractString)
# construct contract
newContract = Contract()
newContract.m_symbol = contractTuple[0]
newContract.m_secType = contractTuple[1]
newContract.m_exchange = contractTuple[2]
newContract.m_currency = contractTuple[3]
newContract.m_expiry = contractTuple[4]
newContract.m_strike = contractTuple[5]
newContract.m_right = contractTuple[6]
# include expired (needed for historical data)
newContract.m_includeExpired = True
# add contract to pull
# self.contracts[contractTuple[0]] = newContract
self.contracts[tickerId] = newContract
# print(vars(newContract))
# print('Contract Values:%s,%s,%s,%s,%s,%s,%s:' % contractTuple)
return newContract
# shortcuts
# ---------------------------------------------------------
def createStockContract(self, symbol, currency="USD", exchange="SMART"):
contract_tuple = (symbol, "STK", exchange, currency, "", 0.0, "")
contract = self.createContract(contract_tuple)
return contract
# ---------------------------------------------------------
def createFuturesContract(self, symbol, currency="USD", expiry=None, exchange="GLOBEX"):
contract_tuple = (symbol, "FUT", exchange, currency, expiry, 0.0, "")
contract = self.createContract(contract_tuple)
return contract
def createFutureContract(self, symbol, currency="USD", expiry=None, exchange="GLOBEX"):
return self.createFuturesContract(symbol=symbol, currency=currency, expiry=expiry, exchange=exchange)
# ---------------------------------------------------------
def createOptionContract(self, symbol, secType="OPT", \
currency="USD", expiry=None, strike=0.0, otype="CALL", exchange="SMART"):
# secType = OPT (Option) / FOP (Options on Futures)
contract_tuple = (symbol, secType, exchange, currency, expiry, float(strike), otype)
contract = self.createContract(contract_tuple)
return contract
# ---------------------------------------------------------
def createCashContract(self, symbol, currency="USD", exchange="IDEALPRO"):
""" Used for FX, etc:
createCashContract("EUR", currency="USD")
"""
contract_tuple = (symbol, "CASH", exchange, currency, "", 0.0, "")
contract = self.createContract(contract_tuple)
return contract
# ---------------------------------------------------------
# order constructors
# ---------------------------------------------------------
def createOrder(self, quantity, price=0., stop=0., tif="DAY", \
fillorkill=False, iceberg=False, transmit=True, rth=False, **kwargs):
# https://www.interactivebrokers.com/en/software/api/apiguide/java/order.htm
order = Order()
order.m_clientId = self.clientId
order.m_action = dataTypes["ORDER_ACTION_BUY"] if quantity>0 else dataTypes["ORDER_ACTION_SELL"]
order.m_totalQuantity = abs(quantity)
if "orderType" in kwargs:
order.m_orderType = kwargs["orderType"]
else:
order.m_orderType = dataTypes["ORDER_TYPE_MARKET"] if price==0 else dataTypes["ORDER_TYPE_LIMIT"]
order.m_lmtPrice = price # LMT Price
order.m_auxPrice = stop # STOP Price
order.m_tif = tif # DAY, GTC, IOC, GTD
order.m_allOrNone = int(fillorkill)
order.hidden = iceberg
order.m_transmit = int(transmit)
order.m_outsideRth = int(rth==False)
# The publicly disclosed order size for Iceberg orders
if iceberg & ("blockOrder" in kwargs):
order.m_blockOrder = kwargs["m_blockOrder"]
# The percent offset amount for relative orders.
if "percentOffset" in kwargs:
order.m_percentOffset = kwargs["percentOffset"]
# The order ID of the parent order,
# used for bracket and auto trailing stop orders.
if "parentId" in kwargs:
order.m_parentId = kwargs["parentId"]
# oca group (Order Cancels All)
# used for bracket and auto trailing stop orders.
if "ocaGroup" in kwargs:
order.m_ocaGroup = kwargs["ocaGroup"]
if "ocaType" in kwargs:
order.m_ocaType = kwargs["ocaType"]
else:
order.m_ocaType = 2 # proportionately reduced size of remaining orders
# For TRAIL order
if "trailingPercent" in kwargs:
order.m_trailingPercent = kwargs["trailingPercent"]
# For TRAILLIMIT orders only
if "trailStopPrice" in kwargs:
order.m_trailStopPrice = kwargs["trailStopPrice"]
return order
# ---------------------------------------------------------
def createTargetOrder(self, quantity, parentId=0, \
target=0., orderType=None, transmit=True, group=None, rth=False):
""" Creates TARGET order """
order = self.createOrder(quantity,
price = target,
transmit = transmit,
orderType = dataTypes["ORDER_TYPE_LIMIT"] if orderType == None else orderType,
ocaGroup = group,
parentId = parentId,
rth = rth
)
return order
# ---------------------------------------------------------
def createStopOrder(self, quantity, parentId=0, \
stop=0., trail=False, transmit=True, group=None, rth=False):
""" Creates STOP order """
if trail:
order = self.createOrder(quantity,
trailingPercent = stop,
transmit = transmit,
orderType = dataTypes["ORDER_TYPE_STOP"],
ocaGroup = group,
parentId = parentId,
rth = rth
)
else:
order = self.createOrder(quantity,
stop = stop,
transmit = transmit,
orderType = dataTypes["ORDER_TYPE_STOP"],
ocaGroup = group,
parentId = parentId,
rth = rth
)
return order
# ---------------------------------------------------------
def createTrailingStopOrder(self, contract, quantity, \
parentId=0, trailPercent=100., group=None, triggerPrice=None):
""" convert hard stop order to trailing stop order """
if parentId not in self.orders:
raise ValueError("Order #"+ str(parentId) +" doesn't exist or wasn't submitted")
return
order = self.createStopOrder(quantity,
stop = trailPercent,
transmit = True,
trail = True,
# ocaGroup = group
parentId = parentId
)
self.requestOrderIds()
return self.placeOrder(contract, order, self.orderId+1)
# ---------------------------------------------------------
def createBracketOrder(self, \
contract, quantity, entry=0., target=0., stop=0., \
targetType=None, trailingStop=None, group=None, \
tif="DAY", fillorkill=False, iceberg=False, rth=False, **kwargs):
"""
creates One Cancels All Bracket Order
"""
if group == None:
group = "bracket_"+str(int(time.time()))
# main order
enteyOrder = self.createOrder(quantity, price=entry, transmit=False,
tif=tif, fillorkill=fillorkill, iceberg=iceberg, rth=rth)
entryOrderId = self.placeOrder(contract, enteyOrder)
# target
targetOrderId = 0
if target > 0:
targetOrder = self.createTargetOrder(-quantity,
parentId = entryOrderId,
target = target,
transmit = False if stop > 0 else True,
orderType = targetType,
group = group,
rth = rth
)
self.requestOrderIds()
targetOrderId = self.placeOrder(contract, targetOrder, self.orderId+1)
# stop
stopOrderId = 0
if stop > 0:
stopOrder = self.createStopOrder(-quantity,
parentId = entryOrderId,
stop = stop,
trail = trailingStop,
transmit = True,
group = group,
rth = rth
)
self.requestOrderIds()
stopOrderId = self.placeOrder(contract, stopOrder, self.orderId+2)
# triggered trailing stop?
# if ("triggerPrice" in kwargs) & ("trailPercent" in kwargs):
# self.pendingTriggeredTrailingStopOrders.append()
# self.signal_ttl = kwargs["signal_ttl"] if "signal_ttl" in kwargs else 0
return {
"group": group,
"entryOrderId": entryOrderId,
"targetOrderId": targetOrderId,
"stopOrderId": stopOrderId
}
# ---------------------------------------------------------
def placeOrder(self, contract, order, orderId=None):
""" Place order on IB TWS """
# get latest order id before submitting an order
self.requestOrderIds()
# continue...
useOrderId = self.orderId if orderId == None else orderId
self.ibConn.placeOrder(useOrderId, contract, order)
# update order id for next time
self.requestOrderIds()
return useOrderId
# ---------------------------------------------------------
def cancelOrder(self, orderId=None):
""" cancel order on IB TWS """
# get latest order id before submitting an order
self.requestOrderIds()
# continue...
useOrderId = self.orderId if orderId == None else orderId
self.ibConn.cancelOrder(useOrderId)
# update order id for next time
self.requestOrderIds()
return useOrderId
# ---------------------------------------------------------
# data requesters
# ---------------------------------------------------------
# https://github.com/blampe/IbPy/blob/master/demo/reference_python
# ---------------------------------------------------------
def requestOrderIds(self, numIds=1):
"""
Request the next valid ID that can be used when placing an order.
Triggers the nextValidId() event, and the id returned is that next valid ID.
# https://www.interactivebrokers.com/en/software/api/apiguide/java/reqids.htm
"""
self.ibConn.reqIds(numIds)
# ---------------------------------------------------------
def requestMarketDepth(self, contracts=None, num_rows=10):
"""
Register to streaming market data updates
https://www.interactivebrokers.com/en/software/api/apiguide/java/reqmktdepth.htm
"""
if num_rows > 10:
num_rows = 10
if contracts == None:
contracts = list(self.contracts.values())
elif not isinstance(contracts, list):
contracts = [contracts]
for contract in contracts:
tickerId = self.tickerId(self.contractString(contract))
self.ibConn.reqMktDepth(
tickerId, contract, num_rows)
# ---------------------------------------------------------
def cancelMarketDepth(self, contracts=None):
"""
Cancel streaming market data for contract
https://www.interactivebrokers.com/en/software/api/apiguide/java/cancelmktdepth.htm
"""
if contracts == None:
contracts = list(self.contracts.values())
elif not isinstance(contracts, list):
contracts = [contracts]
for contract in contracts:
tickerId = self.tickerId(self.contractString(contract))
self.ibConn.cancelMktDepth(tickerId=tickerId)
# ---------------------------------------------------------
def requestMarketData(self, contracts=None):
"""
Register to streaming market data updates
https://www.interactivebrokers.com/en/software/api/apiguide/java/reqmktdata.htm
"""
if contracts == None:
contracts = list(self.contracts.values())
elif not isinstance(contracts, list):
contracts = [contracts]
for contract in contracts:
# tickerId = self.tickerId(contract.m_symbol)
tickerId = self.tickerId(self.contractString(contract))
self.ibConn.reqMktData(
tickerId, contract, dataTypes["GENERIC_TICKS_RTVOLUME"], False)
# ---------------------------------------------------------
def cancelMarketData(self, contracts=None):
"""
Cancel streaming market data for contract
https://www.interactivebrokers.com/en/software/api/apiguide/java/cancelmktdata.htm
"""
if contracts == None:
contracts = list(self.contracts.values())
elif not isinstance(contracts, list):
contracts = [contracts]
for contract in contracts:
# tickerId = self.tickerId(contract.m_symbol)
tickerId = self.tickerId(self.contractString(contract))
self.ibConn.cancelMktData(tickerId=tickerId)
# ---------------------------------------------------------
def requestHistoricalData(self, contracts=None, resolution="1 min",
lookback="1 D", data="TRADES", end_datetime=None, rth=False, csv_path=None):
"""
Download to historical data
https://www.interactivebrokers.com/en/software/api/apiguide/java/reqhistoricaldata.htm
"""
self.csv_path = csv_path
if end_datetime == None:
end_datetime = time.strftime(dataTypes["DATE_TIME_FORMAT_HISTORY"])
if contracts == None:
contracts = list(self.contracts.values())
if not isinstance(contracts, list):
contracts = [contracts]
for contract in contracts:
# tickerId = self.tickerId(contract.m_symbol)
tickerId = self.tickerId(self.contractString(contract))
self.ibConn.reqHistoricalData(
tickerId = tickerId,
contract = contract,
endDateTime = end_datetime,
durationStr = lookback,
barSizeSetting = resolution,
whatToShow = data,
useRTH = int(rth),
formatDate = 2
)
def cancelHistoricalData(self, contracts=None):
""" cancel historical data stream """
if contracts == None:
contracts = list(self.contracts.values())
elif not isinstance(contracts, list):
contracts = [contracts]
for contract in contracts:
# tickerId = self.tickerId(contract.m_symbol)
tickerId = self.tickerId(self.contractString(contract))
self.ibConn.cancelHistoricalData(tickerId=tickerId)
# ---------------------------------------------------------
def requestPositionUpdates(self, subscribe=True):
""" Request/cancel request real-time position data for all accounts. """
if self.subscribePositions != subscribe:
self.subscribePositions = subscribe
if subscribe == True:
self.ibConn.reqPositions()
else:
self.ibConn.cancelPositions()
# ---------------------------------------------------------
def requestAccountUpdates(self, subscribe=True):
"""
Register to account updates
https://www.interactivebrokers.com/en/software/api/apiguide/java/reqaccountupdates.htm
"""
if self.subscribeAccount != subscribe:
self.subscribeAccount = subscribe
self.ibConn.reqAccountUpdates(subscribe, self.accountCode)
Keeps track of orderId between TWS sessions
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# ezIBpy: Pythonic Wrapper for IbPy
# https://github.com/ranaroussi/ezibpy
#
# Copyright 2015 Ran Aroussi
#
# Licensed under the GNU Lesser General Public License, v3.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.gnu.org/licenses/lgpl-3.0.en.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from datetime import datetime
from pandas import DataFrame, read_pickle
from ib.opt import Connection
from ib.ext.Contract import Contract
from ib.ext.Order import Order
from ezibpy.utils import dataTypes
import atexit
import tempfile
import os
from stat import S_IWRITE
# =============================================================
# set debugging mode
# levels: DEBUG, INFO, WARNING, ERROR, CRITICAL
# filename=LOG_FILENAME
# =============================================================
import logging
# import sys
# logging.basicConfig(stream=sys.stdout, level=self.log(mode="debug", msg=
# format='%(asctime)s [%(levelname)s]: %(message)s')
class ezIBpy():
def log(self, mode, msg):
if self.logging:
if mode == "debug":
logging.debug(msg)
elif mode == "info":
logging.info(msg)
elif mode == "warning":
logging.warning(msg)
elif mode == "error":
logging.error(msg)
elif mode == "critical":
logging.critical(msg)
def roundClosestValid(self, val, res, decimals=2):
""" round to closest resolution """
return round(round(val / res)*res, decimals)
"""
https://www.interactivebrokers.com/en/software/api/apiguide/java/java_eclientsocket_methods.htm
"""
# ---------------------------------------------------------
def __init__(self):
self.__version__ = 0.09
self.logging = False
self.clientId = 1
self.port = 4001 # 7496/7497 = TWS, 4001 = IBGateway
self.host = "localhost"
self.ibConn = None
self.time = 0
self.commission = 0
self.accountCode = 0
self.orderId = 1
# auto-construct for every contract/order
self.tickerIds = { 0: "SYMBOL" }
self.contracts = {}
self.orders = {}
self.symbol_orders = {}
self.account = {}
self.positions = {}
self.portfolio = {}
# holds market data
tickDF = DataFrame({
"datetime":[0], "bid":[0], "bidsize":[0],
"ask":[0], "asksize":[0], "last":[0], "lastsize":[0]
})
tickDF.set_index('datetime', inplace=True)
self.marketData = { 0: tickDF } # idx = tickerId
# holds orderbook data
l2DF = DataFrame(index=range(5), data={
"bid":0, "bidsize":0,
"ask":0, "asksize":0
})
self.marketDepthData = { 0: l2DF } # idx = tickerId
# trailing stops
self.trailingStops = {}
# "tickerId" = {
# orderId: ...
# lastPrice: ...
# trailPercent: ...
# trailAmount: ...
# quantity: ...
# }
# triggerable trailing stops
self.triggerableTrailingStops = {}
# "tickerId" = {
# parentId: ...
# stopOrderId: ...
# triggerPrice: ...
# trailPercent: ...
# trailAmount: ...
# quantity: ...
# }
# @TODO - options data
# optionDF = DataFrame({ "datetime":[0], "bid":[0], "ask":[0], "last":[0], "impliedVol":[0], "delta":[0], "optPrice":[0], "pvDividend":[0], "gamma":[0], "vega":[0], "theta":[0], "undPrice":[0] })
# optionDF.set_index('datetime', inplace=True)
# optionDF = {
# "bid": DataFrame({ "impliedVol":[0], "delta":[0], "optPrice":[0], "pvDividend":[0], "gamma":[0], "vega":[0], "theta":[0], "undPrice":[0] }),
# "ask": DataFrame({ "impliedVol":[0], "delta":[0], "optPrice":[0], "pvDividend":[0], "gamma":[0], "vega":[0], "theta":[0], "undPrice":[0] }),
# "last": DataFrame({ "impliedVol":[0], "delta":[0], "optPrice":[0], "pvDividend":[0], "gamma":[0], "vega":[0], "theta":[0], "undPrice":[0] }),
# "model": DataFrame({ "impliedVol":[0], "delta":[0], "optPrice":[0], "pvDividend":[0], "gamma":[0], "vega":[0], "theta":[0], "undPrice":[0] })
# }
# self.optionsData = { 0: optionDF }
# historical data contrainer
self.historicalData = { } # idx = symbol
# ---------------------------------------------------------
def connect(self, clientId=0, host="localhost", port=4001):
""" Establish connection to TWS/IBGW """
self.clientId = clientId
self.host = host
self.port = port
self.ibConn = Connection.create(
host = self.host,
port = self.port,
clientId = self.clientId
)
# Assign error handling function.
self.ibConn.register(self.handleErrorEvents, 'Error')
# Assign server messages handling function.
self.ibConn.registerAll(self.handleServerEvents)
# connect
self.log(mode="info", msg="[CONNECTING TO IB]")
self.ibConn.connect()
# get server time
self.getServerTime()
# subscribe to position and account changes
self.subscribePositions = False
self.requestPositionUpdates(subscribe=True)
self.subscribeAccount = False
self.requestAccountUpdates(subscribe=True)
# force refresh of orderId upon connect
self.handleNextValidId(self.orderId)
# ---------------------------------------------------------
def disconnect(self):
""" Disconnect from TWS/IBGW """
if self.ibConn is not None:
self.log(mode="info", msg="[DISCONNECT TO IB]")
self.ibConn.disconnect()
# ---------------------------------------------------------
def getServerTime(self):
""" get the current time on IB """
self.ibConn.reqCurrentTime()
# ---------------------------------------------------------
# Start event handlers
# ---------------------------------------------------------
def handleErrorEvents(self, msg):
""" logs error messages """
# https://www.interactivebrokers.com/en/software/api/apiguide/tables/api_message_codes.htm
if msg.errorCode != -1: # and msg.errorCode != 2104 and msg.errorCode != 2106:
self.log(mode="error", msg=msg)
# ---------------------------------------------------------
def handleServerEvents(self, msg):
""" dispatch msg to the right handler """
if msg.typeName == "error":
self.log(mode="error", msg="[IB ERROR] "+str(msg))
elif msg.typeName == dataTypes["MSG_CURRENT_TIME"]:
if self.time < msg.time:
self.time = msg.time
elif (msg.typeName == dataTypes["MSG_TYPE_MKT_DEPTH"] or
msg.typeName == dataTypes["MSG_TYPE_MKT_DEPTH_L2"]):
self.handleMarketDepth(msg)
elif msg.typeName == dataTypes["MSG_TYPE_TICK_STRING"]:
self.handleTickString(msg)
elif msg.typeName == dataTypes["MSG_TYPE_TICK_PRICE"]:
self.handleTickPrice(msg)
elif msg.typeName == dataTypes["MSG_TYPE_TICK_SIZE"]:
self.handleTickSize(msg)
elif msg.typeName == dataTypes["MSG_TYPE_TICK_OPTION"]:
self.handleTickOptionComputation(msg)
elif (msg.typeName == dataTypes["MSG_TYPE_OPEN_ORDER"] or
msg.typeName == dataTypes["MSG_TYPE_ORDER_STATUS"]):
self.handleOrders(msg)
elif msg.typeName == dataTypes["MSG_TYPE_HISTORICAL_DATA"]:
self.handleHistoricalData(msg)
elif msg.typeName == dataTypes["MSG_TYPE_ACCOUNT_UPDATES"]:
self.handleAccount(msg)
elif msg.typeName == dataTypes["MSG_TYPE_PORTFOLIO_UPDATES"]:
self.handlePortfolio(msg)
elif msg.typeName == dataTypes["MSG_TYPE_POSITION"]:
self.handlePosition(msg)
elif msg.typeName == dataTypes["MSG_TYPE_NEXT_ORDER_ID"]:
self.handleNextValidId(msg.orderId)
elif msg.typeName == dataTypes["MSG_TYPE_MANAGED_ACCOUNTS"]:
self.accountCode = msg.accountsList
elif msg.typeName == dataTypes["MSG_COMMISSION_REPORT"]:
self.commission = msg.commissionReport.m_commission
else:
self.log(mode="info", msg="[SERVER]: "+ str(msg))
pass
# ---------------------------------------------------------
# generic callback function - can be used externally
# ---------------------------------------------------------
def ibCallback(self, caller, msg, **kwargs):
pass
# ---------------------------------------------------------
# Start admin handlers
# ---------------------------------------------------------
def handleNextValidId(self, orderId):
"""
handle nextValidId event
https://www.interactivebrokers.com/en/software/api/apiguide/java/nextvalidid.htm
"""
self.orderId = orderId
# cash last orderId
try:
# db file
dbfile = tempfile.gettempdir()+"/ezibpy.pkl"
lastOrderId = 1 # default
if os.path.exists(dbfile):
df = read_pickle(dbfile)
filtered = df[df['clientId']==self.clientId]
if len(filtered) > 0:
lastOrderId = filtered['orderId'].values[0]
print(">>", self.orderId, lastOrderId)
# override with db if needed
if self.orderId <= 1:
self.orderId = lastOrderId
elif self.orderId < lastOrderId:
self.orderId = lastOrderId
# make global
self.orderId = self.orderId
# save in db
orderDB = DataFrame(index=[0], data={'clientId':self.clientId, 'orderId':self.orderId})
if os.path.exists(dbfile):
orderDB = df[df['clientId']!=self.clientId].append(orderDB[['clientId', 'orderId']])
orderDB.to_pickle(dbfile)
# make writeable by all users
try: os.chmod(dbfile, S_IWRITE) # windows (cover all)
except: pass
try: os.chmod(dbfile, 0o777) # *nix
except: pass
except:
pass
# ---------------------------------------------------------
def handleAccount(self, msg):
"""
handle account info update
https://www.interactivebrokers.com/en/software/api/apiguide/java/updateaccountvalue.htm
"""
track = ["BuyingPower", "CashBalance", "DayTradesRemaining",
"NetLiquidation", "InitMarginReq", "MaintMarginReq",
"AvailableFunds", "AvailableFunds-C", "AvailableFunds-S"]
if msg.key in track:
# self.log(mode="info", msg="[ACCOUNT]: " + str(msg))
self.account[msg.key] = float(msg.value)
# fire callback
self.ibCallback(caller="handleAccount", msg=msg)
# ---------------------------------------------------------
def handlePosition(self, msg):
""" handle positions changes """
# contract identifier
contractString = self.contractString(msg.contract)
# if msg.pos != 0 or contractString in self.contracts.keys():
self.log(mode="info", msg="[POSITION]: " + str(msg))
self.positions[contractString] = {
"symbol": contractString,
"position": int(msg.pos),
"avgCost": float(msg.avgCost),
"account": msg.account
}
# fire callback
self.ibCallback(caller="handlePosition", msg=msg)
# ---------------------------------------------------------
def handlePortfolio(self, msg):
""" handle portfolio updates """
self.log(mode="info", msg="[PORTFOLIO]: " + str(msg))
# contract identifier
contractString = self.contractString(msg.contract)
self.portfolio[contractString] = {
"symbol": contractString,
"position": int(msg.position),
"marketPrice": float(msg.marketPrice),
"marketValue": float(msg.marketValue),
"averageCost": float(msg.averageCost),
"unrealizedPNL": float(msg.unrealizedPNL),
"realizedPNL": float(msg.realizedPNL),
"account": msg.accountName
}
# fire callback
self.ibCallback(caller="handlePortfolio", msg=msg)
# ---------------------------------------------------------
def handleOrders(self, msg):
""" handle order open & status """
"""
It is possible that orderStatus() may return duplicate messages.
It is essential that you filter the message accordingly.
"""
self.log(mode="info", msg="[ORDER]: " + str(msg))
# get server time
self.getServerTime()
time.sleep(0.001)
# we need to handle mutiple events for the same order status
duplicateMessage = False;
# open order
if msg.typeName == dataTypes["MSG_TYPE_OPEN_ORDER"]:
# contract identifier
contractString = self.contractString(msg.contract)
if msg.orderId in self.orders:
duplicateMessage = True
else:
self.orders[msg.orderId] = {
"id": msg.orderId,
"symbol": contractString,
"contract": msg.contract,
"status": "OPENED",
"reason": None,
"avgFillPrice": 0.,
"parentId": 0,
"time": datetime.fromtimestamp(int(self.time))
}
# order status
elif msg.typeName == dataTypes["MSG_TYPE_ORDER_STATUS"]:
if msg.orderId in self.orders and self.orders[msg.orderId]['status'] == msg.status.upper():
duplicateMessage = True
else:
self.orders[msg.orderId]['status'] = msg.status.upper()
self.orders[msg.orderId]['reason'] = msg.whyHeld
self.orders[msg.orderId]['avgFillPrice'] = float(msg.avgFillPrice)
self.orders[msg.orderId]['parentId'] = int(msg.parentId)
self.orders[msg.orderId]['time'] = datetime.fromtimestamp(int(self.time))
# remove from orders?
# if msg.status.upper() == 'CANCELLED':
# del self.orders[msg.orderId]
# fire callback
if duplicateMessage == False:
self.ibCallback(caller="handleOrders", msg=msg)
# group orders by symbol
self.symbol_orders = self.group_orders("symbol")
# ---------------------------------------------------------
def group_orders(self, by="symbol"):
orders = {}
for orderId in self.orders:
order = self.orders[orderId]
if order[by] not in orders.keys():
orders[order[by]] = {}
try: del order["contract"]
except: pass
orders[order[by]][order['id']] = order
return orders
# ---------------------------------------------------------
# Start price handlers
# ---------------------------------------------------------
def handleMarketDepth(self, msg):
"""
https://www.interactivebrokers.com/en/software/api/apiguide/java/updatemktdepth.htm
https://www.interactivebrokers.com/en/software/api/apiguide/java/updatemktdepthl2.htm
"""
# make sure symbol exists
if msg.tickerId not in self.marketDepthData.keys():
self.marketDepthData[msg.tickerId] = self.marketDepthData[0].copy()
# bid
if msg.side == 1:
self.marketDepthData[msg.tickerId].loc[msg.position, "bid"] = msg.price
self.marketDepthData[msg.tickerId].loc[msg.position, "bidsize"] = msg.size
# ask
elif msg.side == 0:
self.marketDepthData[msg.tickerId].loc[msg.position, "ask"] = msg.price
self.marketDepthData[msg.tickerId].loc[msg.position, "asksize"] = msg.size
"""
# bid/ask spread / vol diff
self.marketDepthData[msg.tickerId].loc[msg.position, "spread"] = \
self.marketDepthData[msg.tickerId].loc[msg.position, "ask"]-\
self.marketDepthData[msg.tickerId].loc[msg.position, "bid"]
self.marketDepthData[msg.tickerId].loc[msg.position, "spreadsize"] = \
self.marketDepthData[msg.tickerId].loc[msg.position, "asksize"]-\
self.marketDepthData[msg.tickerId].loc[msg.position, "bidsize"]
"""
self.ibCallback(caller="handleMarketDepth", msg=msg)
# ---------------------------------------------------------
def handleHistoricalData(self, msg):
# self.log(mode="debug", msg="[HISTORY]: " + str(msg))
print('.', end="",flush=True)
if msg.date[:8].lower() == 'finished':
# print(self.historicalData)
if self.csv_path != None:
for sym in self.historicalData:
# print("[HISTORY FINISHED]: " + str(sym.upper()))
# contractString = self.contractString(str(sym))
contractString = str(sym)
print("[HISTORY FINISHED]: " + contractString)
self.historicalData[sym].to_csv(
self.csv_path + contractString +'.csv'
);
print('.')
# fire callback
self.ibCallback(caller="handleHistoricalData", msg=msg, completed=True)
else:
# create tick holder for ticker
if len(msg.date) <= 8: # daily
ts = datetime.strptime(msg.date, dataTypes["DATE_FORMAT"])
ts = ts.strftime(dataTypes["DATE_FORMAT_HISTORY"])
else:
ts = datetime.fromtimestamp(int(msg.date))
ts = ts.strftime(dataTypes["DATE_TIME_FORMAT_LONG"])
hist_row = DataFrame(index=['datetime'], data={
"datetime":ts, "O":msg.open, "H":msg.high,
"L":msg.low, "C":msg.close, "V":msg.volume,
"OI":msg.count, "WAP": msg.WAP })
hist_row.set_index('datetime', inplace=True)
symbol = self.tickerSymbol(msg.reqId)
if symbol not in self.historicalData.keys():
self.historicalData[symbol] = hist_row
else:
self.historicalData[symbol] = self.historicalData[symbol].append(hist_row)
# fire callback
self.ibCallback(caller="handleHistoricalData", msg=msg, completed=False)
# ---------------------------------------------------------
def handleTickPrice(self, msg):
"""
holds latest tick bid/ask/last price
"""
# self.log(mode="debug", msg="[TICK PRICE]: " + dataTypes["PRICE_TICKS"][msg.field] + " - " + str(msg))
# return
# create tick holder for ticker
if msg.tickerId not in self.marketData.keys():
self.marketData[msg.tickerId] = self.marketData[0].copy()
# bid price
if msg.canAutoExecute == 1 and msg.field == dataTypes["FIELD_BID_PRICE"]:
self.marketData[msg.tickerId]['bid'] = float(msg.price)
# ask price
elif msg.canAutoExecute == 1 and msg.field == dataTypes["FIELD_ASK_PRICE"]:
self.marketData[msg.tickerId]['ask'] = float(msg.price)
# last price
elif msg.field == dataTypes["FIELD_LAST_PRICE"]:
self.marketData[msg.tickerId]['last'] = float(msg.price)
# fire callback
self.ibCallback(caller="handleTickPrice", msg=msg)
# ---------------------------------------------------------
def handleTickSize(self, msg):
"""
holds latest tick bid/ask/last size
"""
# create tick holder for ticker
if msg.tickerId not in self.marketData.keys():
self.marketData[msg.tickerId] = self.marketData[0].copy()
# bid size
if msg.field == dataTypes["FIELD_BID_SIZE"]:
self.marketData[msg.tickerId]['bidsize'] = int(msg.size)
# ask size
elif msg.field == dataTypes["FIELD_ASK_SIZE"]:
self.marketData[msg.tickerId]['asksize'] = int(msg.size)
# last size
elif msg.field == dataTypes["FIELD_LAST_SIZE"]:
self.marketData[msg.tickerId]['lastsize'] = int(msg.size)
# fire callback
self.ibCallback(caller="handleTickSize", msg=msg)
# ---------------------------------------------------------
def handleTickString(self, msg):
"""
holds latest tick bid/ask/last timestamp
"""
# create tick holder for ticker
if msg.tickerId not in self.marketData.keys():
self.marketData[msg.tickerId] = self.marketData[0].copy()
# update timestamp
if msg.tickType == dataTypes["FIELD_LAST_TIMESTAMP"]:
ts = datetime.fromtimestamp(int(msg.value)) \
.strftime(dataTypes["DATE_TIME_FORMAT_LONG_MILLISECS"])
self.marketData[msg.tickerId].index = [ts]
# self.log(mode="debug", msg="[TICK TS]: " + ts)
# handle trailing stop orders
self.triggerTrailingStops(msg.tickerId)
self.handleTrailingStops(msg.tickerId)
# fire callback
self.ibCallback(caller="handleTickString", msg=msg)
elif (msg.tickType == dataTypes["FIELD_RTVOLUME"]):
# self.log(mode="info", msg="[RTVOL]: " + str(msg))
tick = dataTypes["RTVOL_TICKS"]
(tick['price'], tick['size'], tick['time'], tick['volume'],
tick['wap'], tick['single']) = msg.value.split(';')
try:
tick['last'] = float(tick['price'])
tick['lastsize'] = float(tick['size'])
tick['volume'] = float(tick['volume'])
tick['wap'] = float(tick['wap'])
tick['single'] = tick['single'] == 'true'
tick['instrument'] = self.tickerSymbol(msg.tickerId)
# parse time
s, ms = divmod(int(tick['time']), 1000)
tick['time'] = '{}.{:03d}'.format(
time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(s)), ms)
# add most recent bid/ask to "tick"
tick['bid'] = round(self.marketData[msg.tickerId]['bid'][0], 2)
tick['bidsize'] = round(self.marketData[msg.tickerId]['bidsize'][0], 2)
tick['ask'] = round(self.marketData[msg.tickerId]['ask'][0], 2)
tick['asksize'] = round(self.marketData[msg.tickerId]['asksize'][0], 2)
# self.log(mode="debug", msg=tick['time'] + ':' + self.tickerSymbol(msg.tickerId) + "-" + str(tick))
# fire callback
self.ibCallback(caller="handleTickString", msg=msg, tick=tick)
except:
pass
else:
# self.log(mode="info", msg="tickString" + "-" + msg)
# fire callback
self.ibCallback(caller="handleTickString", msg=msg)
# print(msg)
# ---------------------------------------------------------
def handleTickOptionComputation(self, msg):
"""
holds latest option data timestamp
only option price is kept at the moment
https://www.interactivebrokers.com/en/software/api/apiguide/java/tickoptioncomputation.htm
"""
# create tick holder for ticker
if msg.tickerId not in self.marketData.keys():
self.marketData[msg.tickerId] = self.marketData[0].copy()
# bid
if msg.field == dataTypes["FIELD_BID_OPTION_COMPUTATION"]:
self.marketData[msg.tickerId]['bid'] = float(msg.optPrice)
# ask
elif msg.field == dataTypes["FIELD_ASK_OPTION_COMPUTATION"]:
self.marketData[msg.tickerId]['ask'] = float(msg.optPrice)
# last
elif msg.field == dataTypes["FIELD_LAST_OPTION_COMPUTATION"]:
self.marketData[msg.tickerId]['last'] = float(msg.optPrice)
# print(msg)
# fire callback
self.ibCallback(caller="handleTickOptionComputation", msg=msg)
# ---------------------------------------------------------
# trailing stops
# ---------------------------------------------------------
def createTriggerableTrailingStop(self, symbol, quantity=1, \
triggerPrice=0, trailPercent=100., trailAmount=0., \
parentId=0, stopOrderId=None, ticksize=0.01):
""" adds order to triggerable list """
self.triggerableTrailingStops[symbol] = {
"parentId": parentId,
"stopOrderId": stopOrderId,
"triggerPrice": triggerPrice,
"trailAmount": abs(trailAmount),
"trailPercent": abs(trailPercent),
"quantity": quantity,
"ticksize": ticksize
}
return self.triggerableTrailingStops[symbol]
# ---------------------------------------------------------
def registerTrailingStop(self, tickerId, orderId=0, quantity=1, \
lastPrice=0, trailPercent=100., trailAmount=0., parentId=0, ticksize=0.01):
""" adds trailing stop to monitor list """
trailingStop = self.trailingStops[tickerId] = {
"orderId": orderId,
"parentId": parentId,
"lastPrice": lastPrice,
"trailAmount": trailAmount,
"trailPercent": trailPercent,
"quantity": quantity,
"ticksize": ticksize
}
return trailingStop
# ---------------------------------------------------------
def modifyStopOrder(self, orderId, parentId, newStop, quantity):
""" modify stop order """
if orderId in self.orders.keys():
order = self.createStopOrder(
quantity = quantity,
parentId = parentId,
stop = newStop,
trail = False,
transmit = True
)
return self.placeOrder(self.orders[orderId]['contract'], order, orderId)
return None
# ---------------------------------------------------------
def handleTrailingStops(self, tickerId):
""" software-based trailing stop """
# existing?
if tickerId not in self.trailingStops.keys():
return None
# continue
trailingStop = self.trailingStops[tickerId]
price = self.marketData[tickerId]['last'][0]
symbol = self.tickerSymbol(tickerId)
# contract = self.contracts[tickerId]
# contractString = self.contractString(contract)
# filled / no positions?
if (self.positions[symbol] == 0) | \
(self.orders[trailingStop['orderId']]['status'] == "FILLED"):
del self.trailingStops[tickerId]
return None
# continue...
newStop = trailingStop['lastPrice']
ticksize = trailingStop['ticksize']
# long
if (trailingStop['quantity'] < 0) & (trailingStop['lastPrice'] < price):
if abs(trailingStop['trailAmount']) > 0:
newStop = price - abs(trailingStop['trailAmount'])
elif trailingStop['trailPercent'] > 0:
newStop = price - (price*(abs(trailingStop['trailPercent'])/100))
# short
elif (trailingStop['quantity'] > 0) & (trailingStop['lastPrice'] > price):
if abs(trailingStop['trailAmount']) > 0:
newStop = price + abs(trailingStop['trailAmount'])
elif trailingStop['trailPercent'] > 0:
newStop = price + (price*(abs(trailingStop['trailPercent'])/100))
# valid newStop
newStop = self.roundClosestValid(newStop, ticksize)
print("\n\n", trailingStop['lastPrice'], newStop, price, "\n\n")
# no change?
if newStop == trailingStop['lastPrice']:
return None
# submit order
trailingStopOrderId = self.modifyStopOrder(
orderId = trailingStop['orderId'],
parentId = trailingStop['parentId'],
newStop = newStop,
quantity = trailingStop['quantity']
)
if trailingStopOrderId:
self.trailingStops[tickerId]['lastPrice'] = price
return trailingStopOrderId
# ---------------------------------------------------------
def triggerTrailingStops(self, tickerId):
""" trigger waiting trailing stops """
# print('.')
# test
symbol = self.tickerSymbol(tickerId)
price = self.marketData[tickerId]['last'][0]
# contract = self.contracts[tickerId]
if symbol in self.triggerableTrailingStops.keys():
pendingOrder = self.triggerableTrailingStops[symbol]
parentId = pendingOrder["parentId"]
stopOrderId = pendingOrder["stopOrderId"]
triggerPrice = pendingOrder["triggerPrice"]
trailAmount = pendingOrder["trailAmount"]
trailPercent = pendingOrder["trailPercent"]
quantity = pendingOrder["quantity"]
ticksize = pendingOrder["ticksize"]
# print(">>>>>>>", pendingOrder)
# print(">>>>>>>", parentId)
# print(">>>>>>>", self.orders)
# abort
if parentId not in self.orders.keys():
# print("DELETING")
del self.triggerableTrailingStops[symbol]
return None
else:
if self.orders[parentId]["status"] != "FILLED":
return None
# print("\n\n", quantity, triggerPrice, price, "\n\n")
# create the order
if ((quantity > 0) & (triggerPrice >= price)) | ((quantity < 0) & (triggerPrice <= price)) :
newStop = price
if trailAmount > 0:
if quantity > 0:
newStop += trailAmount
else:
newStop -= trailAmount
elif trailPercent > 0:
if quantity > 0:
newStop += price*(trailPercent/100)
else:
newStop -= price*(trailPercent/100)
else:
del self.triggerableTrailingStops[symbol]
return 0
# print("------", stopOrderId , parentId, newStop , quantity, "------")
# use valid newStop
newStop = self.roundClosestValid(newStop, ticksize)
trailingStopOrderId = self.modifyStopOrder(
orderId = stopOrderId,
parentId = parentId,
newStop = newStop,
quantity = quantity
)
if trailingStopOrderId:
# print(">>> TRAILING STOP")
del self.triggerableTrailingStops[symbol]
# register trailing stop
tickerId = self.tickerId(symbol)
self.registerTrailingStop(
tickerId = tickerId,
parentId = parentId,
orderId = stopOrderId,
lastPrice = price,
trailAmount = trailAmount,
trailPercent = trailPercent,
quantity = quantity,
ticksize = ticksize
)
return trailingStopOrderId
return None
# ---------------------------------------------------------
# tickerId/Symbols constructors
# ---------------------------------------------------------
def tickerId(self, symbol):
"""
returns the tickerId for the symbol or
sets one if it doesn't exits
"""
for tickerId in self.tickerIds:
if symbol == self.tickerIds[tickerId]:
return tickerId
break
else:
tickerId = len(self.tickerIds)
self.tickerIds[tickerId] = symbol
return tickerId
# ---------------------------------------------------------
def tickerSymbol(self, tickerId):
""" returns the symbol of a tickerId """
try:
return self.tickerIds[tickerId]
except:
return ""
# ---------------------------------------------------------
def contractString(self, contract, seperator="_"):
""" returns string from contract tuple """
localSymbol = ""
contractTuple = contract
if type(contract) != tuple:
localSymbol = contract.m_localSymbol
contractTuple = (contract.m_symbol, contract.m_secType,
contract.m_exchange, contract.m_currency, contract.m_expiry,
contract.m_strike, contract.m_right)
# build identifier
try:
if contractTuple[1] in ("OPT", "FOP"):
# contractString = (contractTuple[0], contractTuple[1], contractTuple[6], contractTuple[4], contractTuple[5])
if contractTuple[5]*100 - int(contractTuple[5]*100):
strike = contractTuple[5]
else:
strike = "{0:.2f}".format(contractTuple[5])
contractString = (contractTuple[0] + str(contractTuple[4]) + \
contractTuple[6], str(strike).replace(".", ""))
elif contractTuple[1] == "FUT":
# round expiry day to expiry month
if localSymbol != "":
exp = localSymbol[2:-1]+str(contractTuple[4][:4])
else:
exp = str(contractTuple[4])[:6]
exp = dataTypes["MONTH_CODES"][int(exp[4:6])] + str(int(exp[:4]))
contractString = (contractTuple[0] + exp, contractTuple[1])
elif contractTuple[1] == "CASH":
contractString = (contractTuple[0]+contractTuple[3], contractTuple[1])
else: # STK
contractString = (contractTuple[0], contractTuple[1])
# construct string
contractString = seperator.join(
str(v) for v in contractString).replace(seperator+"STK", "")
except:
contractString = contractTuple[0]
return contractString
# ---------------------------------------------------------
# contract constructors
# ---------------------------------------------------------
def createContract(self, contractTuple, **kwargs):
# https://www.interactivebrokers.com/en/software/api/apiguide/java/contract.htm
contractString = self.contractString(contractTuple)
# print(contractString)
# get (or set if not set) the tickerId for this symbol
# tickerId = self.tickerId(contractTuple[0])
tickerId = self.tickerId(contractString)
# construct contract
newContract = Contract()
newContract.m_symbol = contractTuple[0]
newContract.m_secType = contractTuple[1]
newContract.m_exchange = contractTuple[2]
newContract.m_currency = contractTuple[3]
newContract.m_expiry = contractTuple[4]
newContract.m_strike = contractTuple[5]
newContract.m_right = contractTuple[6]
# include expired (needed for historical data)
newContract.m_includeExpired = True
# add contract to pull
# self.contracts[contractTuple[0]] = newContract
self.contracts[tickerId] = newContract
# print(vars(newContract))
# print('Contract Values:%s,%s,%s,%s,%s,%s,%s:' % contractTuple)
return newContract
# shortcuts
# ---------------------------------------------------------
def createStockContract(self, symbol, currency="USD", exchange="SMART"):
contract_tuple = (symbol, "STK", exchange, currency, "", 0.0, "")
contract = self.createContract(contract_tuple)
return contract
# ---------------------------------------------------------
def createFuturesContract(self, symbol, currency="USD", expiry=None, exchange="GLOBEX"):
contract_tuple = (symbol, "FUT", exchange, currency, expiry, 0.0, "")
contract = self.createContract(contract_tuple)
return contract
def createFutureContract(self, symbol, currency="USD", expiry=None, exchange="GLOBEX"):
return self.createFuturesContract(symbol=symbol, currency=currency, expiry=expiry, exchange=exchange)
# ---------------------------------------------------------
def createOptionContract(self, symbol, secType="OPT", \
currency="USD", expiry=None, strike=0.0, otype="CALL", exchange="SMART"):
# secType = OPT (Option) / FOP (Options on Futures)
contract_tuple = (symbol, secType, exchange, currency, expiry, float(strike), otype)
contract = self.createContract(contract_tuple)
return contract
# ---------------------------------------------------------
def createCashContract(self, symbol, currency="USD", exchange="IDEALPRO"):
""" Used for FX, etc:
createCashContract("EUR", currency="USD")
"""
contract_tuple = (symbol, "CASH", exchange, currency, "", 0.0, "")
contract = self.createContract(contract_tuple)
return contract
# ---------------------------------------------------------
# order constructors
# ---------------------------------------------------------
def createOrder(self, quantity, price=0., stop=0., tif="DAY", \
fillorkill=False, iceberg=False, transmit=True, rth=False, **kwargs):
# https://www.interactivebrokers.com/en/software/api/apiguide/java/order.htm
order = Order()
order.m_clientId = self.clientId
order.m_action = dataTypes["ORDER_ACTION_BUY"] if quantity>0 else dataTypes["ORDER_ACTION_SELL"]
order.m_totalQuantity = abs(quantity)
if "orderType" in kwargs:
order.m_orderType = kwargs["orderType"]
else:
order.m_orderType = dataTypes["ORDER_TYPE_MARKET"] if price==0 else dataTypes["ORDER_TYPE_LIMIT"]
order.m_lmtPrice = price # LMT Price
order.m_auxPrice = stop # STOP Price
order.m_tif = tif # DAY, GTC, IOC, GTD
order.m_allOrNone = int(fillorkill)
order.hidden = iceberg
order.m_transmit = int(transmit)
order.m_outsideRth = int(rth==False)
# The publicly disclosed order size for Iceberg orders
if iceberg & ("blockOrder" in kwargs):
order.m_blockOrder = kwargs["m_blockOrder"]
# The percent offset amount for relative orders.
if "percentOffset" in kwargs:
order.m_percentOffset = kwargs["percentOffset"]
# The order ID of the parent order,
# used for bracket and auto trailing stop orders.
if "parentId" in kwargs:
order.m_parentId = kwargs["parentId"]
# oca group (Order Cancels All)
# used for bracket and auto trailing stop orders.
if "ocaGroup" in kwargs:
order.m_ocaGroup = kwargs["ocaGroup"]
if "ocaType" in kwargs:
order.m_ocaType = kwargs["ocaType"]
else:
order.m_ocaType = 2 # proportionately reduced size of remaining orders
# For TRAIL order
if "trailingPercent" in kwargs:
order.m_trailingPercent = kwargs["trailingPercent"]
# For TRAILLIMIT orders only
if "trailStopPrice" in kwargs:
order.m_trailStopPrice = kwargs["trailStopPrice"]
return order
# ---------------------------------------------------------
def createTargetOrder(self, quantity, parentId=0, \
target=0., orderType=None, transmit=True, group=None, rth=False):
""" Creates TARGET order """
order = self.createOrder(quantity,
price = target,
transmit = transmit,
orderType = dataTypes["ORDER_TYPE_LIMIT"] if orderType == None else orderType,
ocaGroup = group,
parentId = parentId,
rth = rth
)
return order
# ---------------------------------------------------------
def createStopOrder(self, quantity, parentId=0, \
stop=0., trail=False, transmit=True, group=None, rth=False):
""" Creates STOP order """
if trail:
order = self.createOrder(quantity,
trailingPercent = stop,
transmit = transmit,
orderType = dataTypes["ORDER_TYPE_STOP"],
ocaGroup = group,
parentId = parentId,
rth = rth
)
else:
order = self.createOrder(quantity,
stop = stop,
transmit = transmit,
orderType = dataTypes["ORDER_TYPE_STOP"],
ocaGroup = group,
parentId = parentId,
rth = rth
)
return order
# ---------------------------------------------------------
def createTrailingStopOrder(self, contract, quantity, \
parentId=0, trailPercent=100., group=None, triggerPrice=None):
""" convert hard stop order to trailing stop order """
if parentId not in self.orders:
raise ValueError("Order #"+ str(parentId) +" doesn't exist or wasn't submitted")
return
order = self.createStopOrder(quantity,
stop = trailPercent,
transmit = True,
trail = True,
# ocaGroup = group
parentId = parentId
)
self.requestOrderIds()
return self.placeOrder(contract, order, self.orderId+1)
# ---------------------------------------------------------
def createBracketOrder(self, \
contract, quantity, entry=0., target=0., stop=0., \
targetType=None, trailingStop=None, group=None, \
tif="DAY", fillorkill=False, iceberg=False, rth=False, **kwargs):
"""
creates One Cancels All Bracket Order
"""
if group == None:
group = "bracket_"+str(int(time.time()))
# main order
enteyOrder = self.createOrder(quantity, price=entry, transmit=False,
tif=tif, fillorkill=fillorkill, iceberg=iceberg, rth=rth)
entryOrderId = self.placeOrder(contract, enteyOrder)
# target
targetOrderId = 0
if target > 0:
targetOrder = self.createTargetOrder(-quantity,
parentId = entryOrderId,
target = target,
transmit = False if stop > 0 else True,
orderType = targetType,
group = group,
rth = rth
)
self.requestOrderIds()
targetOrderId = self.placeOrder(contract, targetOrder, self.orderId+1)
# stop
stopOrderId = 0
if stop > 0:
stopOrder = self.createStopOrder(-quantity,
parentId = entryOrderId,
stop = stop,
trail = trailingStop,
transmit = True,
group = group,
rth = rth
)
self.requestOrderIds()
stopOrderId = self.placeOrder(contract, stopOrder, self.orderId+2)
# triggered trailing stop?
# if ("triggerPrice" in kwargs) & ("trailPercent" in kwargs):
# self.pendingTriggeredTrailingStopOrders.append()
# self.signal_ttl = kwargs["signal_ttl"] if "signal_ttl" in kwargs else 0
return {
"group": group,
"entryOrderId": entryOrderId,
"targetOrderId": targetOrderId,
"stopOrderId": stopOrderId
}
# ---------------------------------------------------------
def placeOrder(self, contract, order, orderId=None):
""" Place order on IB TWS """
# get latest order id before submitting an order
self.requestOrderIds()
# continue...
useOrderId = self.orderId if orderId == None else orderId
self.ibConn.placeOrder(useOrderId, contract, order)
# update order id for next time
self.requestOrderIds()
return useOrderId
# ---------------------------------------------------------
def cancelOrder(self, orderId=None):
""" cancel order on IB TWS """
# get latest order id before submitting an order
self.requestOrderIds()
# continue...
useOrderId = self.orderId if orderId == None else orderId
self.ibConn.cancelOrder(useOrderId)
# update order id for next time
self.requestOrderIds()
return useOrderId
# ---------------------------------------------------------
# data requesters
# ---------------------------------------------------------
# https://github.com/blampe/IbPy/blob/master/demo/reference_python
# ---------------------------------------------------------
def requestOrderIds(self, numIds=1):
"""
Request the next valid ID that can be used when placing an order.
Triggers the nextValidId() event, and the id returned is that next valid ID.
# https://www.interactivebrokers.com/en/software/api/apiguide/java/reqids.htm
"""
self.ibConn.reqIds(numIds)
# ---------------------------------------------------------
def requestMarketDepth(self, contracts=None, num_rows=10):
"""
Register to streaming market data updates
https://www.interactivebrokers.com/en/software/api/apiguide/java/reqmktdepth.htm
"""
if num_rows > 10:
num_rows = 10
if contracts == None:
contracts = list(self.contracts.values())
elif not isinstance(contracts, list):
contracts = [contracts]
for contract in contracts:
tickerId = self.tickerId(self.contractString(contract))
self.ibConn.reqMktDepth(
tickerId, contract, num_rows)
# ---------------------------------------------------------
def cancelMarketDepth(self, contracts=None):
"""
Cancel streaming market data for contract
https://www.interactivebrokers.com/en/software/api/apiguide/java/cancelmktdepth.htm
"""
if contracts == None:
contracts = list(self.contracts.values())
elif not isinstance(contracts, list):
contracts = [contracts]
for contract in contracts:
tickerId = self.tickerId(self.contractString(contract))
self.ibConn.cancelMktDepth(tickerId=tickerId)
# ---------------------------------------------------------
def requestMarketData(self, contracts=None):
"""
Register to streaming market data updates
https://www.interactivebrokers.com/en/software/api/apiguide/java/reqmktdata.htm
"""
if contracts == None:
contracts = list(self.contracts.values())
elif not isinstance(contracts, list):
contracts = [contracts]
for contract in contracts:
# tickerId = self.tickerId(contract.m_symbol)
tickerId = self.tickerId(self.contractString(contract))
self.ibConn.reqMktData(
tickerId, contract, dataTypes["GENERIC_TICKS_RTVOLUME"], False)
# ---------------------------------------------------------
def cancelMarketData(self, contracts=None):
"""
Cancel streaming market data for contract
https://www.interactivebrokers.com/en/software/api/apiguide/java/cancelmktdata.htm
"""
if contracts == None:
contracts = list(self.contracts.values())
elif not isinstance(contracts, list):
contracts = [contracts]
for contract in contracts:
# tickerId = self.tickerId(contract.m_symbol)
tickerId = self.tickerId(self.contractString(contract))
self.ibConn.cancelMktData(tickerId=tickerId)
# ---------------------------------------------------------
def requestHistoricalData(self, contracts=None, resolution="1 min",
lookback="1 D", data="TRADES", end_datetime=None, rth=False, csv_path=None):
"""
Download to historical data
https://www.interactivebrokers.com/en/software/api/apiguide/java/reqhistoricaldata.htm
"""
self.csv_path = csv_path
if end_datetime == None:
end_datetime = time.strftime(dataTypes["DATE_TIME_FORMAT_HISTORY"])
if contracts == None:
contracts = list(self.contracts.values())
if not isinstance(contracts, list):
contracts = [contracts]
for contract in contracts:
# tickerId = self.tickerId(contract.m_symbol)
tickerId = self.tickerId(self.contractString(contract))
self.ibConn.reqHistoricalData(
tickerId = tickerId,
contract = contract,
endDateTime = end_datetime,
durationStr = lookback,
barSizeSetting = resolution,
whatToShow = data,
useRTH = int(rth),
formatDate = 2
)
def cancelHistoricalData(self, contracts=None):
""" cancel historical data stream """
if contracts == None:
contracts = list(self.contracts.values())
elif not isinstance(contracts, list):
contracts = [contracts]
for contract in contracts:
# tickerId = self.tickerId(contract.m_symbol)
tickerId = self.tickerId(self.contractString(contract))
self.ibConn.cancelHistoricalData(tickerId=tickerId)
# ---------------------------------------------------------
def requestPositionUpdates(self, subscribe=True):
""" Request/cancel request real-time position data for all accounts. """
if self.subscribePositions != subscribe:
self.subscribePositions = subscribe
if subscribe == True:
self.ibConn.reqPositions()
else:
self.ibConn.cancelPositions()
# ---------------------------------------------------------
def requestAccountUpdates(self, subscribe=True):
"""
Register to account updates
https://www.interactivebrokers.com/en/software/api/apiguide/java/reqaccountupdates.htm
"""
if self.subscribeAccount != subscribe:
self.subscribeAccount = subscribe
self.ibConn.reqAccountUpdates(subscribe, self.accountCode)
|
""" Test functions for linalg.decomp module
"""
from __future__ import division, print_function, absolute_import
__usage__ = """
Build linalg:
python setup_linalg.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.linalg.test()'
"""
import itertools
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_equal,
assert_, assert_allclose)
import pytest
from pytest import raises as assert_raises
from scipy._lib.six import xrange
from scipy.linalg import (eig, eigvals, lu, svd, svdvals, cholesky, qr,
schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd, hessenberg, rq,
eig_banded, eigvals_banded, eigh, eigvalsh, qr_multiply, qz, orth, ordqz,
subspace_angles, hadamard, eigvalsh_tridiagonal, eigh_tridiagonal,
null_space, cdf2rdf)
from scipy.linalg.lapack import dgbtrf, dgbtrs, zgbtrf, zgbtrs, \
dsbev, dsbevd, dsbevx, zhbevd, zhbevx
from scipy.linalg.misc import norm
from scipy.linalg._decomp_qz import _select_function
from numpy import array, transpose, sometrue, diag, ones, linalg, \
argsort, zeros, arange, float32, complex64, dot, conj, identity, \
ravel, sqrt, iscomplex, shape, sort, conjugate, bmat, sign, \
asarray, matrix, isfinite, all, ndarray, outer, eye, dtype, empty,\
triu, tril
from numpy.random import normal, seed, random
from scipy.linalg._testutils import assert_no_overwrite
# digit precision to use in asserts for different types
DIGITS = {'d':11, 'D':11, 'f':4, 'F':4}
def clear_fuss(ar, fuss_binary_bits=7):
"""Clears trailing `fuss_binary_bits` of mantissa of a floating number"""
x = np.asanyarray(ar)
if np.iscomplexobj(x):
return clear_fuss(x.real) + 1j * clear_fuss(x.imag)
significant_binary_bits = np.finfo(x.dtype).nmant
x_mant, x_exp = np.frexp(x)
f = 2.0**(significant_binary_bits - fuss_binary_bits)
x_mant *= f
np.rint(x_mant, out=x_mant)
x_mant /= f
return np.ldexp(x_mant, x_exp)
# XXX: This function should be available through numpy.testing
def assert_dtype_equal(act, des):
if isinstance(act, ndarray):
act = act.dtype
else:
act = dtype(act)
if isinstance(des, ndarray):
des = des.dtype
else:
des = dtype(des)
assert_(act == des, 'dtype mismatch: "%s" (should be "%s") ' % (act, des))
# XXX: This function should not be defined here, but somewhere in
# scipy.linalg namespace
def symrand(dim_or_eigv):
"""Return a random symmetric (Hermitian) matrix.
If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues
uniformly distributed on (-1,1).
If 'dim_or_eigv' is 1-D real array 'a', return a matrix whose
eigenvalues are 'a'.
"""
if isinstance(dim_or_eigv, int):
dim = dim_or_eigv
d = random(dim)*2 - 1
elif (isinstance(dim_or_eigv, ndarray) and
len(dim_or_eigv.shape) == 1):
dim = dim_or_eigv.shape[0]
d = dim_or_eigv
else:
raise TypeError("input type not supported.")
v = random_rot(dim)
h = dot(dot(v.T.conj(), diag(d)), v)
# to avoid roundoff errors, symmetrize the matrix (again)
h = 0.5*(h.T+h)
return h
# XXX: This function should not be defined here, but somewhere in
# scipy.linalg namespace
def random_rot(dim):
"""Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., 'The efficient generation of random orthogonal
matrices with an application to condition estimators', SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization"""
H = eye(dim)
D = ones((dim,))
for n in range(1, dim):
x = normal(size=(dim-n+1,))
D[n-1] = sign(x[0])
x[0] -= D[n-1]*sqrt((x*x).sum())
# Householder transformation
Hx = eye(dim-n+1) - 2.*outer(x, x)/(x*x).sum()
mat = eye(dim)
mat[n-1:,n-1:] = Hx
H = dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = -D.prod()
H = (D*H.T).T
return H
class TestEigVals(object):
def test_simple(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w = eigvals(a)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
assert_array_almost_equal(w,exact_w)
def test_simple_tr(self):
a = array([[1,2,3],[1,2,3],[2,5,6]],'d')
a = transpose(a).copy()
a = transpose(a)
w = eigvals(a)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
assert_array_almost_equal(w,exact_w)
def test_simple_complex(self):
a = [[1,2,3],[1,2,3],[2,5,6+1j]]
w = eigvals(a)
exact_w = [(9+1j+sqrt(92+6j))/2,
0,
(9+1j-sqrt(92+6j))/2]
assert_array_almost_equal(w,exact_w)
def test_finite(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w = eigvals(a, check_finite=False)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
assert_array_almost_equal(w,exact_w)
class TestEig(object):
def test_simple(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w,v = eig(a)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
v0 = array([1,1,(1+sqrt(93)/3)/2])
v1 = array([3.,0,-1])
v2 = array([1,1,(1-sqrt(93)/3)/2])
v0 = v0 / sqrt(dot(v0,transpose(v0)))
v1 = v1 / sqrt(dot(v1,transpose(v1)))
v2 = v2 / sqrt(dot(v2,transpose(v2)))
assert_array_almost_equal(w,exact_w)
assert_array_almost_equal(v0,v[:,0]*sign(v[0,0]))
assert_array_almost_equal(v1,v[:,1]*sign(v[0,1]))
assert_array_almost_equal(v2,v[:,2]*sign(v[0,2]))
for i in range(3):
assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])
w,v = eig(a,left=1,right=0)
for i in range(3):
assert_array_almost_equal(dot(transpose(a),v[:,i]),w[i]*v[:,i])
def test_simple_complex_eig(self):
a = [[1,2],[-2,1]]
w,vl,vr = eig(a,left=1,right=1)
assert_array_almost_equal(w, array([1+2j, 1-2j]))
for i in range(2):
assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i])
for i in range(2):
assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]),
conjugate(w[i])*vl[:,i])
def test_simple_complex(self):
a = [[1,2,3],[1,2,3],[2,5,6+1j]]
w,vl,vr = eig(a,left=1,right=1)
for i in range(3):
assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i])
for i in range(3):
assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]),
conjugate(w[i])*vl[:,i])
def test_gh_3054(self):
a = [[1]]
b = [[0]]
w, vr = eig(a, b, homogeneous_eigvals=True)
assert_allclose(w[1,0], 0)
assert_(w[0,0] != 0)
assert_allclose(vr, 1)
w, vr = eig(a, b)
assert_equal(w, np.inf)
assert_allclose(vr, 1)
def _check_gen_eig(self, A, B):
if B is not None:
A, B = asarray(A), asarray(B)
B0 = B
else:
A = asarray(A)
B0 = B
B = np.eye(*A.shape)
msg = "\n%r\n%r" % (A, B)
# Eigenvalues in homogeneous coordinates
w, vr = eig(A, B0, homogeneous_eigvals=True)
wt = eigvals(A, B0, homogeneous_eigvals=True)
val1 = dot(A, vr) * w[1,:]
val2 = dot(B, vr) * w[0,:]
for i in range(val1.shape[1]):
assert_allclose(val1[:,i], val2[:,i], rtol=1e-13, atol=1e-13, err_msg=msg)
if B0 is None:
assert_allclose(w[1,:], 1)
assert_allclose(wt[1,:], 1)
perm = np.lexsort(w)
permt = np.lexsort(wt)
assert_allclose(w[:,perm], wt[:,permt], atol=1e-7, rtol=1e-7,
err_msg=msg)
length = np.empty(len(vr))
for i in xrange(len(vr)):
length[i] = norm(vr[:,i])
assert_allclose(length, np.ones(length.size), err_msg=msg,
atol=1e-7, rtol=1e-7)
# Convert homogeneous coordinates
beta_nonzero = (w[1,:] != 0)
wh = w[0,beta_nonzero] / w[1,beta_nonzero]
# Eigenvalues in standard coordinates
w, vr = eig(A, B0)
wt = eigvals(A, B0)
val1 = dot(A, vr)
val2 = dot(B, vr) * w
res = val1 - val2
for i in range(res.shape[1]):
if all(isfinite(res[:,i])):
assert_allclose(res[:,i], 0, rtol=1e-13, atol=1e-13, err_msg=msg)
w_fin = w[isfinite(w)]
wt_fin = wt[isfinite(wt)]
perm = argsort(clear_fuss(w_fin))
permt = argsort(clear_fuss(wt_fin))
assert_allclose(w[perm], wt[permt],
atol=1e-7, rtol=1e-7, err_msg=msg)
length = np.empty(len(vr))
for i in xrange(len(vr)):
length[i] = norm(vr[:,i])
assert_allclose(length, np.ones(length.size), err_msg=msg)
# Compare homogeneous and nonhomogeneous versions
assert_allclose(sort(wh), sort(w[np.isfinite(w)]))
@pytest.mark.xfail(reason="See gh-2254.")
def test_singular(self):
# Example taken from
# http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html
A = array(([22,34,31,31,17], [45,45,42,19,29], [39,47,49,26,34],
[27,31,26,21,15], [38,44,44,24,30]))
B = array(([13,26,25,17,24], [31,46,40,26,37], [26,40,19,25,25],
[16,25,27,14,23], [24,35,18,21,22]))
olderr = np.seterr(all='ignore')
try:
self._check_gen_eig(A, B)
finally:
np.seterr(**olderr)
def test_falker(self):
# Test matrices giving some Nan generalized eigenvalues.
M = diag(array(([1,0,3])))
K = array(([2,-1,-1],[-1,2,-1],[-1,-1,2]))
D = array(([1,-1,0],[-1,1,0],[0,0,0]))
Z = zeros((3,3))
I3 = identity(3)
A = bmat([[I3, Z], [Z, -K]])
B = bmat([[Z, I3], [M, D]])
olderr = np.seterr(all='ignore')
try:
self._check_gen_eig(A, B)
finally:
np.seterr(**olderr)
def test_bad_geneig(self):
# Ticket #709 (strange return values from DGGEV)
def matrices(omega):
c1 = -9 + omega**2
c2 = 2*omega
A = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, c1, 0],
[0, 0, 0, c1]]
B = [[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, -c2],
[0, 1, c2, 0]]
return A, B
# With a buggy LAPACK, this can fail for different omega on different
# machines -- so we need to test several values
olderr = np.seterr(all='ignore')
try:
for k in xrange(100):
A, B = matrices(omega=k*5./100)
self._check_gen_eig(A, B)
finally:
np.seterr(**olderr)
def test_make_eigvals(self):
# Step through all paths in _make_eigvals
seed(1234)
# Real eigenvalues
A = symrand(3)
self._check_gen_eig(A, None)
B = symrand(3)
self._check_gen_eig(A, B)
# Complex eigenvalues
A = random((3, 3)) + 1j*random((3, 3))
self._check_gen_eig(A, None)
B = random((3, 3)) + 1j*random((3, 3))
self._check_gen_eig(A, B)
def test_check_finite(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w,v = eig(a, check_finite=False)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
v0 = array([1,1,(1+sqrt(93)/3)/2])
v1 = array([3.,0,-1])
v2 = array([1,1,(1-sqrt(93)/3)/2])
v0 = v0 / sqrt(dot(v0,transpose(v0)))
v1 = v1 / sqrt(dot(v1,transpose(v1)))
v2 = v2 / sqrt(dot(v2,transpose(v2)))
assert_array_almost_equal(w,exact_w)
assert_array_almost_equal(v0,v[:,0]*sign(v[0,0]))
assert_array_almost_equal(v1,v[:,1]*sign(v[0,1]))
assert_array_almost_equal(v2,v[:,2]*sign(v[0,2]))
for i in range(3):
assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])
def test_not_square_error(self):
"""Check that passing a non-square array raises a ValueError."""
A = np.arange(6).reshape(3,2)
assert_raises(ValueError, eig, A)
def test_shape_mismatch(self):
"""Check that passing arrays of with different shapes raises a ValueError."""
A = identity(2)
B = np.arange(9.0).reshape(3,3)
assert_raises(ValueError, eig, A, B)
assert_raises(ValueError, eig, B, A)
class TestEigBanded(object):
def setup_method(self):
self.create_bandmat()
def create_bandmat(self):
"""Create the full matrix `self.fullmat` and
the corresponding band matrix `self.bandmat`."""
N = 10
self.KL = 2 # number of subdiagonals (below the diagonal)
self.KU = 2 # number of superdiagonals (above the diagonal)
# symmetric band matrix
self.sym_mat = (diag(1.0*ones(N))
+ diag(-1.0*ones(N-1), -1) + diag(-1.0*ones(N-1), 1)
+ diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
# hermitian band matrix
self.herm_mat = (diag(-1.0*ones(N))
+ 1j*diag(1.0*ones(N-1), -1) - 1j*diag(1.0*ones(N-1), 1)
+ diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
# general real band matrix
self.real_mat = (diag(1.0*ones(N))
+ diag(-1.0*ones(N-1), -1) + diag(-3.0*ones(N-1), 1)
+ diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
# general complex band matrix
self.comp_mat = (1j*diag(1.0*ones(N))
+ diag(-1.0*ones(N-1), -1) + 1j*diag(-3.0*ones(N-1), 1)
+ diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
# Eigenvalues and -vectors from linalg.eig
ew, ev = linalg.eig(self.sym_mat)
ew = ew.real
args = argsort(ew)
self.w_sym_lin = ew[args]
self.evec_sym_lin = ev[:,args]
ew, ev = linalg.eig(self.herm_mat)
ew = ew.real
args = argsort(ew)
self.w_herm_lin = ew[args]
self.evec_herm_lin = ev[:,args]
# Extract upper bands from symmetric and hermitian band matrices
# (for use in dsbevd, dsbevx, zhbevd, zhbevx
# and their single precision versions)
LDAB = self.KU + 1
self.bandmat_sym = zeros((LDAB, N), dtype=float)
self.bandmat_herm = zeros((LDAB, N), dtype=complex)
for i in xrange(LDAB):
self.bandmat_sym[LDAB-i-1,i:N] = diag(self.sym_mat, i)
self.bandmat_herm[LDAB-i-1,i:N] = diag(self.herm_mat, i)
# Extract bands from general real and complex band matrix
# (for use in dgbtrf, dgbtrs and their single precision versions)
LDAB = 2*self.KL + self.KU + 1
self.bandmat_real = zeros((LDAB, N), dtype=float)
self.bandmat_real[2*self.KL,:] = diag(self.real_mat) # diagonal
for i in xrange(self.KL):
# superdiagonals
self.bandmat_real[2*self.KL-1-i,i+1:N] = diag(self.real_mat, i+1)
# subdiagonals
self.bandmat_real[2*self.KL+1+i,0:N-1-i] = diag(self.real_mat,-i-1)
self.bandmat_comp = zeros((LDAB, N), dtype=complex)
self.bandmat_comp[2*self.KL,:] = diag(self.comp_mat) # diagonal
for i in xrange(self.KL):
# superdiagonals
self.bandmat_comp[2*self.KL-1-i,i+1:N] = diag(self.comp_mat, i+1)
# subdiagonals
self.bandmat_comp[2*self.KL+1+i,0:N-1-i] = diag(self.comp_mat,-i-1)
# absolute value for linear equation system A*x = b
self.b = 1.0*arange(N)
self.bc = self.b * (1 + 1j)
#####################################################################
def test_dsbev(self):
"""Compare dsbev eigenvalues and eigenvectors with
the result of linalg.eig."""
w, evec, info = dsbev(self.bandmat_sym, compute_v=1)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_sym_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
def test_dsbevd(self):
"""Compare dsbevd eigenvalues and eigenvectors with
the result of linalg.eig."""
w, evec, info = dsbevd(self.bandmat_sym, compute_v=1)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_sym_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
def test_dsbevx(self):
"""Compare dsbevx eigenvalues and eigenvectors
with the result of linalg.eig."""
N,N = shape(self.sym_mat)
## Achtung: Argumente 0.0,0.0,range?
w, evec, num, ifail, info = dsbevx(self.bandmat_sym, 0.0, 0.0, 1, N,
compute_v=1, range=2)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_sym_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
def test_zhbevd(self):
"""Compare zhbevd eigenvalues and eigenvectors
with the result of linalg.eig."""
w, evec, info = zhbevd(self.bandmat_herm, compute_v=1)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_herm_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))
def test_zhbevx(self):
"""Compare zhbevx eigenvalues and eigenvectors
with the result of linalg.eig."""
N,N = shape(self.herm_mat)
## Achtung: Argumente 0.0,0.0,range?
w, evec, num, ifail, info = zhbevx(self.bandmat_herm, 0.0, 0.0, 1, N,
compute_v=1, range=2)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_herm_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))
def test_eigvals_banded(self):
"""Compare eigenvalues of eigvals_banded with those of linalg.eig."""
w_sym = eigvals_banded(self.bandmat_sym)
w_sym = w_sym.real
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
w_herm = eigvals_banded(self.bandmat_herm)
w_herm = w_herm.real
assert_array_almost_equal(sort(w_herm), self.w_herm_lin)
# extracting eigenvalues with respect to an index range
ind1 = 2
ind2 = 6
w_sym_ind = eigvals_banded(self.bandmat_sym,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_sym_ind),
self.w_sym_lin[ind1:ind2+1])
w_herm_ind = eigvals_banded(self.bandmat_herm,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_herm_ind),
self.w_herm_lin[ind1:ind2+1])
# extracting eigenvalues with respect to a value range
v_lower = self.w_sym_lin[ind1] - 1.0e-5
v_upper = self.w_sym_lin[ind2] + 1.0e-5
w_sym_val = eigvals_banded(self.bandmat_sym,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_sym_val),
self.w_sym_lin[ind1:ind2+1])
v_lower = self.w_herm_lin[ind1] - 1.0e-5
v_upper = self.w_herm_lin[ind2] + 1.0e-5
w_herm_val = eigvals_banded(self.bandmat_herm,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_herm_val),
self.w_herm_lin[ind1:ind2+1])
w_sym = eigvals_banded(self.bandmat_sym, check_finite=False)
w_sym = w_sym.real
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
def test_eig_banded(self):
"""Compare eigenvalues and eigenvectors of eig_banded
with those of linalg.eig. """
w_sym, evec_sym = eig_banded(self.bandmat_sym)
evec_sym_ = evec_sym[:,argsort(w_sym.real)]
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))
w_herm, evec_herm = eig_banded(self.bandmat_herm)
evec_herm_ = evec_herm[:,argsort(w_herm.real)]
assert_array_almost_equal(sort(w_herm), self.w_herm_lin)
assert_array_almost_equal(abs(evec_herm_), abs(self.evec_herm_lin))
# extracting eigenvalues with respect to an index range
ind1 = 2
ind2 = 6
w_sym_ind, evec_sym_ind = eig_banded(self.bandmat_sym,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_sym_ind),
self.w_sym_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_sym_ind),
abs(self.evec_sym_lin[:,ind1:ind2+1]))
w_herm_ind, evec_herm_ind = eig_banded(self.bandmat_herm,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_herm_ind),
self.w_herm_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_herm_ind),
abs(self.evec_herm_lin[:,ind1:ind2+1]))
# extracting eigenvalues with respect to a value range
v_lower = self.w_sym_lin[ind1] - 1.0e-5
v_upper = self.w_sym_lin[ind2] + 1.0e-5
w_sym_val, evec_sym_val = eig_banded(self.bandmat_sym,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_sym_val),
self.w_sym_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_sym_val),
abs(self.evec_sym_lin[:,ind1:ind2+1]))
v_lower = self.w_herm_lin[ind1] - 1.0e-5
v_upper = self.w_herm_lin[ind2] + 1.0e-5
w_herm_val, evec_herm_val = eig_banded(self.bandmat_herm,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_herm_val),
self.w_herm_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_herm_val),
abs(self.evec_herm_lin[:,ind1:ind2+1]))
w_sym, evec_sym = eig_banded(self.bandmat_sym, check_finite=False)
evec_sym_ = evec_sym[:,argsort(w_sym.real)]
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))
def test_dgbtrf(self):
"""Compare dgbtrf LU factorisation with the LU factorisation result
of linalg.lu."""
M,N = shape(self.real_mat)
lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)
# extract matrix u from lu_symm_band
u = diag(lu_symm_band[2*self.KL,:])
for i in xrange(self.KL + self.KU):
u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1)
p_lin, l_lin, u_lin = lu(self.real_mat, permute_l=0)
assert_array_almost_equal(u, u_lin)
def test_zgbtrf(self):
"""Compare zgbtrf LU factorisation with the LU factorisation result
of linalg.lu."""
M,N = shape(self.comp_mat)
lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)
# extract matrix u from lu_symm_band
u = diag(lu_symm_band[2*self.KL,:])
for i in xrange(self.KL + self.KU):
u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1)
p_lin, l_lin, u_lin = lu(self.comp_mat, permute_l=0)
assert_array_almost_equal(u, u_lin)
def test_dgbtrs(self):
"""Compare dgbtrs solutions for linear equation system A*x = b
with solutions of linalg.solve."""
lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)
y, info = dgbtrs(lu_symm_band, self.KL, self.KU, self.b, ipiv)
y_lin = linalg.solve(self.real_mat, self.b)
assert_array_almost_equal(y, y_lin)
def test_zgbtrs(self):
"""Compare zgbtrs solutions for linear equation system A*x = b
with solutions of linalg.solve."""
lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)
y, info = zgbtrs(lu_symm_band, self.KL, self.KU, self.bc, ipiv)
y_lin = linalg.solve(self.comp_mat, self.bc)
assert_array_almost_equal(y, y_lin)
class TestEigTridiagonal(object):
def setup_method(self):
self.create_trimat()
def create_trimat(self):
"""Create the full matrix `self.fullmat`, `self.d`, and `self.e`."""
N = 10
# symmetric band matrix
self.d = 1.0*ones(N)
self.e = -1.0*ones(N-1)
self.full_mat = (diag(self.d) + diag(self.e, -1) + diag(self.e, 1))
ew, ev = linalg.eig(self.full_mat)
ew = ew.real
args = argsort(ew)
self.w = ew[args]
self.evec = ev[:, args]
def test_degenerate(self):
"""Test error conditions."""
# Wrong sizes
assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e[:-1])
# Must be real
assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e * 1j)
# Bad driver
assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e,
lapack_driver=1.)
assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e,
lapack_driver='foo')
# Bad bounds
assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e,
select='i', select_range=(0, -1))
def test_eigvalsh_tridiagonal(self):
"""Compare eigenvalues of eigvalsh_tridiagonal with those of eig."""
# can't use ?STERF with subselection
for driver in ('sterf', 'stev', 'stebz', 'stemr', 'auto'):
w = eigvalsh_tridiagonal(self.d, self.e, lapack_driver=driver)
assert_array_almost_equal(sort(w), self.w)
for driver in ('sterf', 'stev'):
assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e,
lapack_driver='stev', select='i',
select_range=(0, 1))
for driver in ('stebz', 'stemr', 'auto'):
# extracting eigenvalues with respect to the full index range
w_ind = eigvalsh_tridiagonal(
self.d, self.e, select='i', select_range=(0, len(self.d)-1),
lapack_driver=driver)
assert_array_almost_equal(sort(w_ind), self.w)
# extracting eigenvalues with respect to an index range
ind1 = 2
ind2 = 6
w_ind = eigvalsh_tridiagonal(
self.d, self.e, select='i', select_range=(ind1, ind2),
lapack_driver=driver)
assert_array_almost_equal(sort(w_ind), self.w[ind1:ind2+1])
# extracting eigenvalues with respect to a value range
v_lower = self.w[ind1] - 1.0e-5
v_upper = self.w[ind2] + 1.0e-5
w_val = eigvalsh_tridiagonal(
self.d, self.e, select='v', select_range=(v_lower, v_upper),
lapack_driver=driver)
assert_array_almost_equal(sort(w_val), self.w[ind1:ind2+1])
def test_eigh_tridiagonal(self):
"""Compare eigenvalues and eigenvectors of eigh_tridiagonal
with those of eig. """
# can't use ?STERF when eigenvectors are requested
assert_raises(ValueError, eigh_tridiagonal, self.d, self.e,
lapack_driver='sterf')
for driver in ('stebz', 'stev', 'stemr', 'auto'):
w, evec = eigh_tridiagonal(self.d, self.e, lapack_driver=driver)
evec_ = evec[:, argsort(w)]
assert_array_almost_equal(sort(w), self.w)
assert_array_almost_equal(abs(evec_), abs(self.evec))
assert_raises(ValueError, eigh_tridiagonal, self.d, self.e,
lapack_driver='stev', select='i', select_range=(0, 1))
for driver in ('stebz', 'stemr', 'auto'):
# extracting eigenvalues with respect to an index range
ind1 = 0
ind2 = len(self.d)-1
w, evec = eigh_tridiagonal(
self.d, self.e, select='i', select_range=(ind1, ind2),
lapack_driver=driver)
assert_array_almost_equal(sort(w), self.w)
assert_array_almost_equal(abs(evec), abs(self.evec))
ind1 = 2
ind2 = 6
w, evec = eigh_tridiagonal(
self.d, self.e, select='i', select_range=(ind1, ind2),
lapack_driver=driver)
assert_array_almost_equal(sort(w), self.w[ind1:ind2+1])
assert_array_almost_equal(abs(evec),
abs(self.evec[:, ind1:ind2+1]))
# extracting eigenvalues with respect to a value range
v_lower = self.w[ind1] - 1.0e-5
v_upper = self.w[ind2] + 1.0e-5
w, evec = eigh_tridiagonal(
self.d, self.e, select='v', select_range=(v_lower, v_upper),
lapack_driver=driver)
assert_array_almost_equal(sort(w), self.w[ind1:ind2+1])
assert_array_almost_equal(abs(evec),
abs(self.evec[:, ind1:ind2+1]))
def test_eigh():
DIM = 6
v = {'dim': (DIM,),
'dtype': ('f','d','F','D'),
'overwrite': (True, False),
'lower': (True, False),
'turbo': (True, False),
'eigvals': (None, (2, DIM-2))}
for dim in v['dim']:
for typ in v['dtype']:
for overwrite in v['overwrite']:
for turbo in v['turbo']:
for eigenvalues in v['eigvals']:
for lower in v['lower']:
eigenhproblem_standard(
'ordinary',
dim, typ, overwrite, lower,
turbo, eigenvalues)
eigenhproblem_general(
'general ',
dim, typ, overwrite, lower,
turbo, eigenvalues)
def test_eigh_of_sparse():
# This tests the rejection of inputs that eigh cannot currently handle.
import scipy.sparse
a = scipy.sparse.identity(2).tocsc()
b = np.atleast_2d(a)
assert_raises(ValueError, eigh, a)
assert_raises(ValueError, eigh, b)
def _complex_symrand(dim, dtype):
a1, a2 = symrand(dim), symrand(dim)
# add antisymmetric matrix as imag part
a = a1 + 1j*(triu(a2)-tril(a2))
return a.astype(dtype)
def eigenhproblem_standard(desc, dim, dtype,
overwrite, lower, turbo,
eigenvalues):
"""Solve a standard eigenvalue problem."""
if iscomplex(empty(1, dtype=dtype)):
a = _complex_symrand(dim, dtype)
else:
a = symrand(dim).astype(dtype)
if overwrite:
a_c = a.copy()
else:
a_c = a
w, z = eigh(a, overwrite_a=overwrite, lower=lower, eigvals=eigenvalues)
assert_dtype_equal(z.dtype, dtype)
w = w.astype(dtype)
diag_ = diag(dot(z.T.conj(), dot(a_c, z))).real
assert_array_almost_equal(diag_, w, DIGITS[dtype])
def eigenhproblem_general(desc, dim, dtype,
overwrite, lower, turbo,
eigenvalues):
"""Solve a generalized eigenvalue problem."""
if iscomplex(empty(1, dtype=dtype)):
a = _complex_symrand(dim, dtype)
b = _complex_symrand(dim, dtype)+diag([2.1]*dim).astype(dtype)
else:
a = symrand(dim).astype(dtype)
b = symrand(dim).astype(dtype)+diag([2.1]*dim).astype(dtype)
if overwrite:
a_c, b_c = a.copy(), b.copy()
else:
a_c, b_c = a, b
w, z = eigh(a, b, overwrite_a=overwrite, lower=lower,
overwrite_b=overwrite, turbo=turbo, eigvals=eigenvalues)
assert_dtype_equal(z.dtype, dtype)
w = w.astype(dtype)
diag1_ = diag(dot(z.T.conj(), dot(a_c, z))).real
assert_array_almost_equal(diag1_, w, DIGITS[dtype])
diag2_ = diag(dot(z.T.conj(), dot(b_c, z))).real
assert_array_almost_equal(diag2_, ones(diag2_.shape[0]), DIGITS[dtype])
def test_eigh_integer():
a = array([[1,2],[2,7]])
b = array([[3,1],[1,5]])
w,z = eigh(a)
w,z = eigh(a,b)
class TestLU(object):
def setup_method(self):
self.a = array([[1,2,3],[1,2,3],[2,5,6]])
self.ca = array([[1,2,3],[1,2,3],[2,5j,6]])
# Those matrices are more robust to detect problems in permutation
# matrices than the ones above
self.b = array([[1,2,3],[4,5,6],[7,8,9]])
self.cb = array([[1j,2j,3j],[4j,5j,6j],[7j,8j,9j]])
# Reectangular matrices
self.hrect = array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])
self.chrect = 1.j * array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])
self.vrect = array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])
self.cvrect = 1.j * array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])
# Medium sizes matrices
self.med = random((30, 40))
self.cmed = random((30, 40)) + 1.j * random((30, 40))
def _test_common(self, data):
p,l,u = lu(data)
assert_array_almost_equal(dot(dot(p,l),u),data)
pl,u = lu(data,permute_l=1)
assert_array_almost_equal(dot(pl,u),data)
# Simple tests
def test_simple(self):
self._test_common(self.a)
def test_simple_complex(self):
self._test_common(self.ca)
def test_simple2(self):
self._test_common(self.b)
def test_simple2_complex(self):
self._test_common(self.cb)
# rectangular matrices tests
def test_hrectangular(self):
self._test_common(self.hrect)
def test_vrectangular(self):
self._test_common(self.vrect)
def test_hrectangular_complex(self):
self._test_common(self.chrect)
def test_vrectangular_complex(self):
self._test_common(self.cvrect)
# Bigger matrices
def test_medium1(self):
"""Check lu decomposition on medium size, rectangular matrix."""
self._test_common(self.med)
def test_medium1_complex(self):
"""Check lu decomposition on medium size, rectangular matrix."""
self._test_common(self.cmed)
def test_check_finite(self):
p, l, u = lu(self.a, check_finite=False)
assert_array_almost_equal(dot(dot(p,l),u), self.a)
def test_simple_known(self):
# Ticket #1458
for order in ['C', 'F']:
A = np.array([[2, 1],[0, 1.]], order=order)
LU, P = lu_factor(A)
assert_array_almost_equal(LU, np.array([[2, 1], [0, 1]]))
assert_array_equal(P, np.array([0, 1]))
class TestLUSingle(TestLU):
"""LU testers for single precision, real and double"""
def setup_method(self):
TestLU.setup_method(self)
self.a = self.a.astype(float32)
self.ca = self.ca.astype(complex64)
self.b = self.b.astype(float32)
self.cb = self.cb.astype(complex64)
self.hrect = self.hrect.astype(float32)
self.chrect = self.hrect.astype(complex64)
self.vrect = self.vrect.astype(float32)
self.cvrect = self.vrect.astype(complex64)
self.med = self.vrect.astype(float32)
self.cmed = self.vrect.astype(complex64)
class TestLUSolve(object):
def setup_method(self):
seed(1234)
def test_lu(self):
a0 = random((10,10))
b = random((10,))
for order in ['C', 'F']:
a = np.array(a0, order=order)
x1 = solve(a,b)
lu_a = lu_factor(a)
x2 = lu_solve(lu_a,b)
assert_array_almost_equal(x1,x2)
def test_check_finite(self):
a = random((10,10))
b = random((10,))
x1 = solve(a,b)
lu_a = lu_factor(a, check_finite=False)
x2 = lu_solve(lu_a,b, check_finite=False)
assert_array_almost_equal(x1,x2)
class TestSVD_GESDD(object):
def setup_method(self):
self.lapack_driver = 'gesdd'
seed(1234)
def test_degenerate(self):
assert_raises(TypeError, svd, [[1.]], lapack_driver=1.)
assert_raises(ValueError, svd, [[1.]], lapack_driver='foo')
def test_simple(self):
a = [[1,2,3],[1,20,3],[2,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(3))
assert_array_almost_equal(dot(transpose(vh),vh),identity(3))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_singular(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(3))
assert_array_almost_equal(dot(transpose(vh),vh),identity(3))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_underdet(self):
a = [[1,2,3],[4,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[0]))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_overdet(self):
a = [[1,2],[4,5],[3,4]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u), identity(u.shape[1]))
assert_array_almost_equal(dot(transpose(vh),vh),identity(2))
sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_random(self):
n = 20
m = 15
for i in range(3):
for a in [random([n,m]),random([m,n])]:
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[1]))
assert_array_almost_equal(dot(vh, transpose(vh)),identity(vh.shape[0]))
sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_complex(self):
a = [[1,2,3],[1,2j,3],[2,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1]))
assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(vh.shape[0]))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_random_complex(self):
n = 20
m = 15
for i in range(3):
for full_matrices in (True, False):
for a in [random([n,m]),random([m,n])]:
a = a + 1j*random(list(a.shape))
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1]))
# This fails when [m,n]
# assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(len(vh),dtype=vh.dtype.char))
sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_crash_1580(self):
sizes = [(13, 23), (30, 50), (60, 100)]
np.random.seed(1234)
for sz in sizes:
for dt in [np.float32, np.float64, np.complex64, np.complex128]:
a = np.random.rand(*sz).astype(dt)
# should not crash
svd(a, lapack_driver=self.lapack_driver)
def test_check_finite(self):
a = [[1,2,3],[1,20,3],[2,5,6]]
u,s,vh = svd(a, check_finite=False, lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(3))
assert_array_almost_equal(dot(transpose(vh),vh),identity(3))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_gh_5039(self):
# This is a smoke test for https://github.com/scipy/scipy/issues/5039
#
# The following is reported to raise "ValueError: On entry to DGESDD
# parameter number 12 had an illegal value".
# `interp1d([1,2,3,4], [1,2,3,4], kind='cubic')`
# This is reported to only show up on LAPACK 3.0.3.
#
# The matrix below is taken from the call to
# `B = _fitpack._bsplmat(order, xk)` in interpolate._find_smoothest
b = np.array(
[[0.16666667, 0.66666667, 0.16666667, 0., 0., 0.],
[0., 0.16666667, 0.66666667, 0.16666667, 0., 0.],
[0., 0., 0.16666667, 0.66666667, 0.16666667, 0.],
[0., 0., 0., 0.16666667, 0.66666667, 0.16666667]])
svd(b, lapack_driver=self.lapack_driver)
class TestSVD_GESVD(TestSVD_GESDD):
def setup_method(self):
self.lapack_driver = 'gesvd'
seed(1234)
class TestSVDVals(object):
def test_empty(self):
for a in [[]], np.empty((2, 0)), np.ones((0, 3)):
s = svdvals(a)
assert_equal(s, np.empty(0))
def test_simple(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
s = svdvals(a)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
def test_simple_underdet(self):
a = [[1,2,3],[4,5,6]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_overdet(self):
a = [[1,2],[4,5],[3,4]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_complex(self):
a = [[1,2,3],[1,20,3j],[2,5,6]]
s = svdvals(a)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
def test_simple_underdet_complex(self):
a = [[1,2,3],[4,5j,6]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_overdet_complex(self):
a = [[1,2],[4,5],[3j,4]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_check_finite(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
s = svdvals(a, check_finite=False)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
@pytest.mark.slow
def test_crash_2609(self):
np.random.seed(1234)
a = np.random.rand(1500, 2800)
# Shouldn't crash:
svdvals(a)
class TestDiagSVD(object):
def test_simple(self):
assert_array_almost_equal(diagsvd([1,0,0],3,3),[[1,0,0],[0,0,0],[0,0,0]])
class TestQR(object):
def setup_method(self):
seed(1234)
def test_simple(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_simple_left(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a)
c = [1, 2, 3]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
qc,r2 = qr_multiply(a, identity(3), "left")
assert_array_almost_equal(q, qc)
def test_simple_right(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a)
c = [1, 2, 3]
qc,r2 = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), qc)
assert_array_almost_equal(r, r2)
qc,r = qr_multiply(a, identity(3))
assert_array_almost_equal(q, qc)
def test_simple_pivoting(self):
a = np.asarray([[8,2,3],[2,9,3],[5,3,6]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_left_pivoting(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3]
qc,r,jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(dot(q, c), qc)
def test_simple_right_pivoting(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3]
qc,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), qc)
def test_simple_trap(self):
a = [[8,2,3],[2,9,3]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
def test_simple_trap_pivoting(self):
a = np.asarray([[8,2,3],[2,9,3]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_tall(self):
# full version
a = [[8,2],[2,9],[5,3]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_simple_tall_pivoting(self):
# full version pivoting
a = np.asarray([[8,2],[2,9],[5,3]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_tall_e(self):
# economy version
a = [[8,2],[2,9],[5,3]]
q,r = qr(a, mode='economic')
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (3,2))
assert_equal(r.shape, (2,2))
def test_simple_tall_e_pivoting(self):
# economy version pivoting
a = np.asarray([[8,2],[2,9],[5,3]])
q,r,p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p], mode='economic')
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_tall_left(self):
a = [[8,2],[2,9],[5,3]]
q,r = qr(a, mode="economic")
c = [1, 2]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
c = array([1,2,0])
qc,r2 = qr_multiply(a, c, "left", overwrite_c=True)
assert_array_almost_equal(dot(q, c[:2]), qc)
qc,r = qr_multiply(a, identity(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_tall_left_pivoting(self):
a = [[8,2],[2,9],[5,3]]
q,r,jpvt = qr(a, mode="economic", pivoting=True)
c = [1, 2]
qc,r,kpvt = qr_multiply(a, c, "left", True)
assert_array_equal(jpvt, kpvt)
assert_array_almost_equal(dot(q, c), qc)
qc,r,jpvt = qr_multiply(a, identity(2), "left", True)
assert_array_almost_equal(qc, q)
def test_simple_tall_right(self):
a = [[8,2],[2,9],[5,3]]
q,r = qr(a, mode="economic")
c = [1, 2, 3]
cq,r2 = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
assert_array_almost_equal(r, r2)
cq,r = qr_multiply(a, identity(3))
assert_array_almost_equal(cq, q)
def test_simple_tall_right_pivoting(self):
a = [[8,2],[2,9],[5,3]]
q,r,jpvt = qr(a, pivoting=True, mode="economic")
c = [1, 2, 3]
cq,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), cq)
cq,r,jpvt = qr_multiply(a, identity(3), pivoting=True)
assert_array_almost_equal(cq, q)
def test_simple_fat(self):
# full version
a = [[8,2,5],[2,9,3]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
def test_simple_fat_pivoting(self):
# full version pivoting
a = np.asarray([[8,2,5],[2,9,3]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_fat_e(self):
# economy version
a = [[8,2,3],[2,9,5]]
q,r = qr(a, mode='economic')
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
def test_simple_fat_e_pivoting(self):
# economy version pivoting
a = np.asarray([[8,2,3],[2,9,5]])
q,r,p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
q2,r2 = qr(a[:,p], mode='economic')
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_fat_left(self):
a = [[8,2,3],[2,9,5]]
q,r = qr(a, mode="economic")
c = [1, 2]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
qc,r = qr_multiply(a, identity(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_fat_left_pivoting(self):
a = [[8,2,3],[2,9,5]]
q,r,jpvt = qr(a, mode="economic", pivoting=True)
c = [1, 2]
qc,r,jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(dot(q, c), qc)
qc,r,jpvt = qr_multiply(a, identity(2), "left", True)
assert_array_almost_equal(qc, q)
def test_simple_fat_right(self):
a = [[8,2,3],[2,9,5]]
q,r = qr(a, mode="economic")
c = [1, 2]
cq,r2 = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
assert_array_almost_equal(r, r2)
cq,r = qr_multiply(a, identity(2))
assert_array_almost_equal(cq, q)
def test_simple_fat_right_pivoting(self):
a = [[8,2,3],[2,9,5]]
q,r,jpvt = qr(a, pivoting=True, mode="economic")
c = [1, 2]
cq,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), cq)
cq,r,jpvt = qr_multiply(a, identity(2), pivoting=True)
assert_array_almost_equal(cq, q)
def test_simple_complex(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_simple_complex_left(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(3), "left")
assert_array_almost_equal(q, qc)
def test_simple_complex_right(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), qc)
qc,r = qr_multiply(a, identity(3))
assert_array_almost_equal(q, qc)
def test_simple_tall_complex_left(self):
a = [[8,2+3j],[2,9],[5+7j,3]]
q,r = qr(a, mode="economic")
c = [1, 2+2j]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
c = array([1,2,0])
qc,r2 = qr_multiply(a, c, "left", overwrite_c=True)
assert_array_almost_equal(dot(q, c[:2]), qc)
qc,r = qr_multiply(a, identity(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_complex_left_conjugate(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c, "left", conjugate=True)
assert_array_almost_equal(dot(q.conjugate(), c), qc)
def test_simple_complex_tall_left_conjugate(self):
a = [[3,3+4j],[5,2+2j],[3,2]]
q,r = qr(a, mode='economic')
c = [1, 3+4j]
qc,r = qr_multiply(a, c, "left", conjugate=True)
assert_array_almost_equal(dot(q.conjugate(), c), qc)
def test_simple_complex_right_conjugate(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c, conjugate=True)
assert_array_almost_equal(dot(c, q.conjugate()), qc)
def test_simple_complex_pivoting(self):
a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_complex_left_pivoting(self):
a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3+4j]
qc,r,jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(dot(q, c), qc)
def test_simple_complex_right_pivoting(self):
a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3+4j]
qc,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), qc)
def test_random(self):
n = 20
for k in range(2):
a = random([n,n])
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a)
def test_random_left(self):
n = 20
for k in range(2):
a = random([n,n])
q,r = qr(a)
c = random([n])
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(n), "left")
assert_array_almost_equal(q, qc)
def test_random_right(self):
n = 20
for k in range(2):
a = random([n,n])
q,r = qr(a)
c = random([n])
cq,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
cq,r = qr_multiply(a, identity(n))
assert_array_almost_equal(q, cq)
def test_random_pivoting(self):
n = 20
for k in range(2):
a = random([n,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_tall(self):
# full version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a)
def test_random_tall_left(self):
# full version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a, mode="economic")
c = random([n])
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(n), "left")
assert_array_almost_equal(qc, q)
def test_random_tall_right(self):
# full version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a, mode="economic")
c = random([m])
cq,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
cq,r = qr_multiply(a, identity(m))
assert_array_almost_equal(cq, q)
def test_random_tall_pivoting(self):
# full version pivoting
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_tall_e(self):
# economy version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a, mode='economic')
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (m,n))
assert_equal(r.shape, (n,n))
def test_random_tall_e_pivoting(self):
# economy version pivoting
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r,p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a[:,p])
assert_equal(q.shape, (m,n))
assert_equal(r.shape, (n,n))
q2,r2 = qr(a[:,p], mode='economic')
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_trap(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a)
def test_random_trap_pivoting(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_complex(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r = qr(a)
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n))
assert_array_almost_equal(dot(q,r),a)
def test_random_complex_left(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r = qr(a)
c = random([n])+1j*random([n])
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(n), "left")
assert_array_almost_equal(q, qc)
def test_random_complex_right(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r = qr(a)
c = random([n])+1j*random([n])
cq,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
cq,r = qr_multiply(a, identity(n))
assert_array_almost_equal(q, cq)
def test_random_complex_pivoting(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_check_finite(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a, check_finite=False)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_lwork(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
# Get comparison values
q,r = qr(a, lwork=None)
# Test against minimum valid lwork
q2,r2 = qr(a, lwork=3)
assert_array_almost_equal(q2,q)
assert_array_almost_equal(r2,r)
# Test against larger lwork
q3,r3 = qr(a, lwork=10)
assert_array_almost_equal(q3,q)
assert_array_almost_equal(r3,r)
# Test against explicit lwork=-1
q4,r4 = qr(a, lwork=-1)
assert_array_almost_equal(q4,q)
assert_array_almost_equal(r4,r)
# Test against invalid lwork
assert_raises(Exception, qr, (a,), {'lwork':0})
assert_raises(Exception, qr, (a,), {'lwork':2})
class TestRQ(object):
def setup_method(self):
seed(1234)
def test_simple(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_r(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
r,q = rq(a)
r2 = rq(a, mode='r')
assert_array_almost_equal(r, r2)
def test_random(self):
n = 20
for k in range(2):
a = random([n,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_simple_trap(self):
a = [[8,2,3],[2,9,3]]
r,q = rq(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_simple_tall(self):
a = [[8,2],[2,9],[5,3]]
r,q = rq(a)
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(r,q),a)
def test_simple_fat(self):
a = [[8,2,5],[2,9,3]]
r,q = rq(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_simple_complex(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
r,q = rq(a)
assert_array_almost_equal(dot(q, conj(transpose(q))),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_random_tall(self):
m = 200
n = 100
for k in range(2):
a = random([m,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_random_trap(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_random_trap_economic(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
r,q = rq(a, mode='economic')
assert_array_almost_equal(dot(q,transpose(q)),identity(m))
assert_array_almost_equal(dot(r,q),a)
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (m, m))
def test_random_complex(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, conj(transpose(q))),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_random_complex_economic(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])+1j*random([m,n])
r,q = rq(a, mode='economic')
assert_array_almost_equal(dot(q,conj(transpose(q))),identity(m))
assert_array_almost_equal(dot(r,q),a)
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (m, m))
def test_check_finite(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
r,q = rq(a, check_finite=False)
assert_array_almost_equal(dot(q, transpose(q)),identity(3))
assert_array_almost_equal(dot(r,q),a)
transp = transpose
any = sometrue
class TestSchur(object):
def test_simple(self):
a = [[8,12,3],[2,9,3],[10,3,6]]
t,z = schur(a)
assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a)
tc,zc = schur(a,'complex')
assert_(any(ravel(iscomplex(zc))) and any(ravel(iscomplex(tc))))
assert_array_almost_equal(dot(dot(zc,tc),transp(conj(zc))),a)
tc2,zc2 = rsf2csf(tc,zc)
assert_array_almost_equal(dot(dot(zc2,tc2),transp(conj(zc2))),a)
def test_sort(self):
a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]]
s,u,sdim = schur(a,sort='lhp')
assert_array_almost_equal([[0.1134,0.5436,0.8316,0.],
[-0.1134,-0.8245,0.5544,0.],
[-0.8213,0.1308,0.0265,-0.5547],
[-0.5475,0.0872,0.0177,0.8321]],
u,3)
assert_array_almost_equal([[-1.4142,0.1456,-11.5816,-7.7174],
[0.,-0.5000,9.4472,-0.7184],
[0.,0.,1.4142,-0.1456],
[0.,0.,0.,0.5]],
s,3)
assert_equal(2,sdim)
s,u,sdim = schur(a,sort='rhp')
assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],
[-0.4862,0.4930,-0.1434,-0.7071],
[0.6042,0.3944,-0.6924,0.],
[0.4028,0.5986,0.6924,0.]],
u,3)
assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],
[0.,0.5,6.5809,-3.1870],
[0.,0.,-1.4142,0.9270],
[0.,0.,0.,-0.5]],
s,3)
assert_equal(2,sdim)
s,u,sdim = schur(a,sort='iuc')
assert_array_almost_equal([[0.5547,0.,-0.5721,-0.6042],
[-0.8321,0.,-0.3814,-0.4028],
[0.,0.7071,-0.5134,0.4862],
[0.,0.7071,0.5134,-0.4862]],
u,3)
assert_array_almost_equal([[-0.5000,0.0000,-6.5809,-4.0974],
[0.,0.5000,-3.3191,-14.4130],
[0.,0.,1.4142,2.1573],
[0.,0.,0.,-1.4142]],
s,3)
assert_equal(2,sdim)
s,u,sdim = schur(a,sort='ouc')
assert_array_almost_equal([[0.4862,-0.5134,0.7071,0.],
[-0.4862,0.5134,0.7071,0.],
[0.6042,0.5721,0.,-0.5547],
[0.4028,0.3814,0.,0.8321]],
u,3)
assert_array_almost_equal([[1.4142,-2.1573,14.4130,4.0974],
[0.,-1.4142,3.3191,6.5809],
[0.,0.,-0.5000,0.],
[0.,0.,0.,0.5000]],
s,3)
assert_equal(2,sdim)
rhp_function = lambda x: x >= 0.0
s,u,sdim = schur(a,sort=rhp_function)
assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],
[-0.4862,0.4930,-0.1434,-0.7071],
[0.6042,0.3944,-0.6924,0.],
[0.4028,0.5986,0.6924,0.]],
u,3)
assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],
[0.,0.5,6.5809,-3.1870],
[0.,0.,-1.4142,0.9270],
[0.,0.,0.,-0.5]],
s,3)
assert_equal(2,sdim)
def test_sort_errors(self):
a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]]
assert_raises(ValueError, schur, a, sort='unsupported')
assert_raises(ValueError, schur, a, sort=1)
def test_check_finite(self):
a = [[8,12,3],[2,9,3],[10,3,6]]
t,z = schur(a, check_finite=False)
assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a)
class TestHessenberg(object):
def test_simple(self):
a = [[-149, -50,-154],
[537, 180, 546],
[-27, -9, -25]]
h1 = [[-149.0000,42.2037,-156.3165],
[-537.6783,152.5511,-554.9272],
[0,0.0728, 2.4489]]
h,q = hessenberg(a,calc_q=1)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
assert_array_almost_equal(h,h1,decimal=4)
def test_simple_complex(self):
a = [[-149, -50,-154],
[537, 180j, 546],
[-27j, -9, -25]]
h,q = hessenberg(a,calc_q=1)
h1 = dot(transp(conj(q)),dot(a,q))
assert_array_almost_equal(h1,h)
def test_simple2(self):
a = [[1,2,3,4,5,6,7],
[0,2,3,4,6,7,2],
[0,2,2,3,0,3,2],
[0,0,2,8,0,0,2],
[0,3,1,2,0,1,2],
[0,1,2,3,0,1,0],
[0,0,0,0,0,1,2]]
h,q = hessenberg(a,calc_q=1)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
def test_simple3(self):
a = np.eye(3)
a[-1, 0] = 2
h, q = hessenberg(a, calc_q=1)
assert_array_almost_equal(dot(transp(q), dot(a, q)), h)
def test_random(self):
n = 20
for k in range(2):
a = random([n,n])
h,q = hessenberg(a,calc_q=1)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
def test_random_complex(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
h,q = hessenberg(a,calc_q=1)
h1 = dot(transp(conj(q)),dot(a,q))
assert_array_almost_equal(h1,h)
def test_check_finite(self):
a = [[-149, -50,-154],
[537, 180, 546],
[-27, -9, -25]]
h1 = [[-149.0000,42.2037,-156.3165],
[-537.6783,152.5511,-554.9272],
[0,0.0728, 2.4489]]
h,q = hessenberg(a,calc_q=1, check_finite=False)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
assert_array_almost_equal(h,h1,decimal=4)
def test_2x2(self):
a = [[2, 1], [7, 12]]
h, q = hessenberg(a, calc_q=1)
assert_array_almost_equal(q, np.eye(2))
assert_array_almost_equal(h, a)
b = [[2-7j, 1+2j], [7+3j, 12-2j]]
h2, q2 = hessenberg(b, calc_q=1)
assert_array_almost_equal(q2, np.eye(2))
assert_array_almost_equal(h2, b)
class TestQZ(object):
def setup_method(self):
seed(12345)
def test_qz_single(self):
n = 5
A = random([n,n]).astype(float32)
B = random([n,n]).astype(float32)
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
assert_array_almost_equal(dot(Q,Q.T), eye(n))
assert_array_almost_equal(dot(Z,Z.T), eye(n))
assert_(all(diag(BB) >= 0))
def test_qz_double(self):
n = 5
A = random([n,n])
B = random([n,n])
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
assert_array_almost_equal(dot(Q,Q.T), eye(n))
assert_array_almost_equal(dot(Z,Z.T), eye(n))
assert_(all(diag(BB) >= 0))
def test_qz_complex(self):
n = 5
A = random([n,n]) + 1j*random([n,n])
B = random([n,n]) + 1j*random([n,n])
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B)
assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n))
assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n))
assert_(all(diag(BB) >= 0))
assert_(all(diag(BB).imag == 0))
def test_qz_complex64(self):
n = 5
A = (random([n,n]) + 1j*random([n,n])).astype(complex64)
B = (random([n,n]) + 1j*random([n,n])).astype(complex64)
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A, decimal=5)
assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B, decimal=5)
assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n), decimal=5)
assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n), decimal=5)
assert_(all(diag(BB) >= 0))
assert_(all(diag(BB).imag == 0))
def test_qz_double_complex(self):
n = 5
A = random([n,n])
B = random([n,n])
AA,BB,Q,Z = qz(A,B, output='complex')
aa = dot(dot(Q,AA),Z.conjugate().T)
assert_array_almost_equal(aa.real, A)
assert_array_almost_equal(aa.imag, 0)
bb = dot(dot(Q,BB),Z.conjugate().T)
assert_array_almost_equal(bb.real, B)
assert_array_almost_equal(bb.imag, 0)
assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n))
assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n))
assert_(all(diag(BB) >= 0))
def test_qz_double_sort(self):
# from http://www.nag.com/lapack-ex/node119.html
# NOTE: These matrices may be ill-conditioned and lead to a
# seg fault on certain python versions when compiled with
# sse2 or sse3 older ATLAS/LAPACK binaries for windows
# A = np.array([[3.9, 12.5, -34.5, -0.5],
# [ 4.3, 21.5, -47.5, 7.5],
# [ 4.3, 21.5, -43.5, 3.5],
# [ 4.4, 26.0, -46.0, 6.0 ]])
# B = np.array([[ 1.0, 2.0, -3.0, 1.0],
# [1.0, 3.0, -5.0, 4.0],
# [1.0, 3.0, -4.0, 3.0],
# [1.0, 3.0, -4.0, 4.0]])
A = np.array([[3.9, 12.5, -34.5, 2.5],
[4.3, 21.5, -47.5, 7.5],
[4.3, 1.5, -43.5, 3.5],
[4.4, 6.0, -46.0, 6.0]])
B = np.array([[1.0, 1.0, -3.0, 1.0],
[1.0, 3.0, -5.0, 4.4],
[1.0, 2.0, -4.0, 1.0],
[1.2, 3.0, -4.0, 4.0]])
sort = lambda ar,ai,beta: ai == 0
assert_raises(ValueError, qz, A, B, sort=sort)
if False:
AA,BB,Q,Z,sdim = qz(A,B,sort=sort)
# assert_(sdim == 2)
assert_(sdim == 4)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
# test absolute values bc the sign is ambiguous and might be platform
# dependent
assert_array_almost_equal(np.abs(AA), np.abs(np.array(
[[35.7864, -80.9061, -12.0629, -9.498],
[0., 2.7638, -2.3505, 7.3256],
[0., 0., 0.6258, -0.0398],
[0., 0., 0., -12.8217]])), 4)
assert_array_almost_equal(np.abs(BB), np.abs(np.array(
[[4.5324, -8.7878, 3.2357, -3.5526],
[0., 1.4314, -2.1894, 0.9709],
[0., 0., 1.3126, -0.3468],
[0., 0., 0., 0.559]])), 4)
assert_array_almost_equal(np.abs(Q), np.abs(np.array(
[[-0.4193, -0.605, -0.1894, -0.6498],
[-0.5495, 0.6987, 0.2654, -0.3734],
[-0.4973, -0.3682, 0.6194, 0.4832],
[-0.5243, 0.1008, -0.7142, 0.4526]])), 4)
assert_array_almost_equal(np.abs(Z), np.abs(np.array(
[[-0.9471, -0.2971, -0.1217, 0.0055],
[-0.0367, 0.1209, 0.0358, 0.9913],
[0.3171, -0.9041, -0.2547, 0.1312],
[0.0346, 0.2824, -0.9587, 0.0014]])), 4)
# test absolute values bc the sign is ambiguous and might be platform
# dependent
# assert_array_almost_equal(abs(AA), abs(np.array([
# [3.8009, -69.4505, 50.3135, -43.2884],
# [0.0000, 9.2033, -0.2001, 5.9881],
# [0.0000, 0.0000, 1.4279, 4.4453],
# [0.0000, 0.0000, 0.9019, -1.1962]])), 4)
# assert_array_almost_equal(abs(BB), abs(np.array([
# [1.9005, -10.2285, 0.8658, -5.2134],
# [0.0000, 2.3008, 0.7915, 0.4262],
# [0.0000, 0.0000, 0.8101, 0.0000],
# [0.0000, 0.0000, 0.0000, -0.2823]])), 4)
# assert_array_almost_equal(abs(Q), abs(np.array([
# [0.4642, 0.7886, 0.2915, -0.2786],
# [0.5002, -0.5986, 0.5638, -0.2713],
# [0.5002, 0.0154, -0.0107, 0.8657],
# [0.5331, -0.1395, -0.7727, -0.3151]])), 4)
# assert_array_almost_equal(dot(Q,Q.T), eye(4))
# assert_array_almost_equal(abs(Z), abs(np.array([
# [0.9961, -0.0014, 0.0887, -0.0026],
# [0.0057, -0.0404, -0.0938, -0.9948],
# [0.0626, 0.7194, -0.6908, 0.0363],
# [0.0626, -0.6934, -0.7114, 0.0956]])), 4)
# assert_array_almost_equal(dot(Z,Z.T), eye(4))
# def test_qz_complex_sort(self):
# cA = np.array([
# [-21.10+22.50*1j, 53.50+-50.50*1j, -34.50+127.50*1j, 7.50+ 0.50*1j],
# [-0.46+ -7.78*1j, -3.50+-37.50*1j, -15.50+ 58.50*1j,-10.50+ -1.50*1j],
# [ 4.30+ -5.50*1j, 39.70+-17.10*1j, -68.50+ 12.50*1j, -7.50+ -3.50*1j],
# [ 5.50+ 4.40*1j, 14.40+ 43.30*1j, -32.50+-46.00*1j,-19.00+-32.50*1j]])
# cB = np.array([
# [1.00+ -5.00*1j, 1.60+ 1.20*1j,-3.00+ 0.00*1j, 0.00+ -1.00*1j],
# [0.80+ -0.60*1j, 3.00+ -5.00*1j,-4.00+ 3.00*1j,-2.40+ -3.20*1j],
# [1.00+ 0.00*1j, 2.40+ 1.80*1j,-4.00+ -5.00*1j, 0.00+ -3.00*1j],
# [0.00+ 1.00*1j,-1.80+ 2.40*1j, 0.00+ -4.00*1j, 4.00+ -5.00*1j]])
# AAS,BBS,QS,ZS,sdim = qz(cA,cB,sort='lhp')
# eigenvalues = diag(AAS)/diag(BBS)
# assert_(all(np.real(eigenvalues[:sdim] < 0)))
# assert_(all(np.real(eigenvalues[sdim:] > 0)))
def test_check_finite(self):
n = 5
A = random([n,n])
B = random([n,n])
AA,BB,Q,Z = qz(A,B,check_finite=False)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
assert_array_almost_equal(dot(Q,Q.T), eye(n))
assert_array_almost_equal(dot(Z,Z.T), eye(n))
assert_(all(diag(BB) >= 0))
def _make_pos(X):
# the decompositions can have different signs than verified results
return np.sign(X)*X
class TestOrdQZ(object):
@classmethod
def setup_class(cls):
# http://www.nag.com/lapack-ex/node119.html
A1 = np.array([[-21.10 - 22.50j, 53.5 - 50.5j, -34.5 + 127.5j,
7.5 + 0.5j],
[-0.46 - 7.78j, -3.5 - 37.5j, -15.5 + 58.5j,
-10.5 - 1.5j],
[4.30 - 5.50j, 39.7 - 17.1j, -68.5 + 12.5j,
-7.5 - 3.5j],
[5.50 + 4.40j, 14.4 + 43.3j, -32.5 - 46.0j,
-19.0 - 32.5j]])
B1 = np.array([[1.0 - 5.0j, 1.6 + 1.2j, -3 + 0j, 0.0 - 1.0j],
[0.8 - 0.6j, .0 - 5.0j, -4 + 3j, -2.4 - 3.2j],
[1.0 + 0.0j, 2.4 + 1.8j, -4 - 5j, 0.0 - 3.0j],
[0.0 + 1.0j, -1.8 + 2.4j, 0 - 4j, 4.0 - 5.0j]])
# http://www.nag.com/numeric/fl/nagdoc_fl23/xhtml/F08/f08yuf.xml
A2 = np.array([[3.9, 12.5, -34.5, -0.5],
[4.3, 21.5, -47.5, 7.5],
[4.3, 21.5, -43.5, 3.5],
[4.4, 26.0, -46.0, 6.0]])
B2 = np.array([[1, 2, -3, 1],
[1, 3, -5, 4],
[1, 3, -4, 3],
[1, 3, -4, 4]])
# example with the eigenvalues
# -0.33891648, 1.61217396+0.74013521j, 1.61217396-0.74013521j,
# 0.61244091
# thus featuring:
# * one complex conjugate eigenvalue pair,
# * one eigenvalue in the lhp
# * 2 eigenvalues in the unit circle
# * 2 non-real eigenvalues
A3 = np.array([[5., 1., 3., 3.],
[4., 4., 2., 7.],
[7., 4., 1., 3.],
[0., 4., 8., 7.]])
B3 = np.array([[8., 10., 6., 10.],
[7., 7., 2., 9.],
[9., 1., 6., 6.],
[5., 1., 4., 7.]])
# example with infinite eigenvalues
A4 = np.eye(2)
B4 = np.diag([0, 1])
# example with (alpha, beta) = (0, 0)
A5 = np.diag([1, 0])
B5 = np.diag([1, 0])
cls.A = [A1, A2, A3, A4, A5]
cls.B = [B1, B2, B3, B4, A5]
def qz_decomp(self, sort):
try:
olderr = np.seterr('raise')
ret = [ordqz(Ai, Bi, sort=sort) for Ai, Bi in zip(self.A, self.B)]
finally:
np.seterr(**olderr)
return tuple(ret)
def check(self, A, B, sort, AA, BB, alpha, beta, Q, Z):
Id = np.eye(*A.shape)
# make sure Q and Z are orthogonal
assert_array_almost_equal(Q.dot(Q.T.conj()), Id)
assert_array_almost_equal(Z.dot(Z.T.conj()), Id)
# check factorization
assert_array_almost_equal(Q.dot(AA), A.dot(Z))
assert_array_almost_equal(Q.dot(BB), B.dot(Z))
# check shape of AA and BB
assert_array_equal(np.tril(AA, -2), np.zeros(AA.shape))
assert_array_equal(np.tril(BB, -1), np.zeros(BB.shape))
# check eigenvalues
for i in range(A.shape[0]):
# does the current diagonal element belong to a 2-by-2 block
# that was already checked?
if i > 0 and A[i, i - 1] != 0:
continue
# take care of 2-by-2 blocks
if i < AA.shape[0] - 1 and AA[i + 1, i] != 0:
evals, _ = eig(AA[i:i + 2, i:i + 2], BB[i:i + 2, i:i + 2])
# make sure the pair of complex conjugate eigenvalues
# is ordered consistently (positive imaginary part first)
if evals[0].imag < 0:
evals = evals[[1, 0]]
tmp = alpha[i:i + 2]/beta[i:i + 2]
if tmp[0].imag < 0:
tmp = tmp[[1, 0]]
assert_array_almost_equal(evals, tmp)
else:
if alpha[i] == 0 and beta[i] == 0:
assert_equal(AA[i, i], 0)
assert_equal(BB[i, i], 0)
elif beta[i] == 0:
assert_equal(BB[i, i], 0)
else:
assert_almost_equal(AA[i, i]/BB[i, i], alpha[i]/beta[i])
sortfun = _select_function(sort)
lastsort = True
for i in range(A.shape[0]):
cursort = sortfun(np.array([alpha[i]]), np.array([beta[i]]))
# once the sorting criterion was not matched all subsequent
# eigenvalues also shouldn't match
if not lastsort:
assert(not cursort)
lastsort = cursort
def check_all(self, sort):
ret = self.qz_decomp(sort)
for reti, Ai, Bi in zip(ret, self.A, self.B):
self.check(Ai, Bi, sort, *reti)
def test_lhp(self):
self.check_all('lhp')
def test_rhp(self):
self.check_all('rhp')
def test_iuc(self):
self.check_all('iuc')
def test_ouc(self):
self.check_all('ouc')
def test_ref(self):
# real eigenvalues first (top-left corner)
def sort(x, y):
out = np.empty_like(x, dtype=bool)
nonzero = (y != 0)
out[~nonzero] = False
out[nonzero] = (x[nonzero]/y[nonzero]).imag == 0
return out
self.check_all(sort)
def test_cef(self):
# complex eigenvalues first (top-left corner)
def sort(x, y):
out = np.empty_like(x, dtype=bool)
nonzero = (y != 0)
out[~nonzero] = False
out[nonzero] = (x[nonzero]/y[nonzero]).imag != 0
return out
self.check_all(sort)
def test_diff_input_types(self):
ret = ordqz(self.A[1], self.B[2], sort='lhp')
self.check(self.A[1], self.B[2], 'lhp', *ret)
ret = ordqz(self.B[2], self.A[1], sort='lhp')
self.check(self.B[2], self.A[1], 'lhp', *ret)
def test_sort_explicit(self):
# Test order of the eigenvalues in the 2 x 2 case where we can
# explicitly compute the solution
A1 = np.eye(2)
B1 = np.diag([-2, 0.5])
expected1 = [('lhp', [-0.5, 2]),
('rhp', [2, -0.5]),
('iuc', [-0.5, 2]),
('ouc', [2, -0.5])]
A2 = np.eye(2)
B2 = np.diag([-2 + 1j, 0.5 + 0.5j])
expected2 = [('lhp', [1/(-2 + 1j), 1/(0.5 + 0.5j)]),
('rhp', [1/(0.5 + 0.5j), 1/(-2 + 1j)]),
('iuc', [1/(-2 + 1j), 1/(0.5 + 0.5j)]),
('ouc', [1/(0.5 + 0.5j), 1/(-2 + 1j)])]
# 'lhp' is ambiguous so don't test it
A3 = np.eye(2)
B3 = np.diag([2, 0])
expected3 = [('rhp', [0.5, np.inf]),
('iuc', [0.5, np.inf]),
('ouc', [np.inf, 0.5])]
# 'rhp' is ambiguous so don't test it
A4 = np.eye(2)
B4 = np.diag([-2, 0])
expected4 = [('lhp', [-0.5, np.inf]),
('iuc', [-0.5, np.inf]),
('ouc', [np.inf, -0.5])]
A5 = np.diag([0, 1])
B5 = np.diag([0, 0.5])
# 'lhp' and 'iuc' are ambiguous so don't test them
expected5 = [('rhp', [2, np.nan]),
('ouc', [2, np.nan])]
A = [A1, A2, A3, A4, A5]
B = [B1, B2, B3, B4, B5]
expected = [expected1, expected2, expected3, expected4, expected5]
for Ai, Bi, expectedi in zip(A, B, expected):
for sortstr, expected_eigvals in expectedi:
_, _, alpha, beta, _, _ = ordqz(Ai, Bi, sort=sortstr)
azero = (alpha == 0)
bzero = (beta == 0)
x = np.empty_like(alpha)
x[azero & bzero] = np.nan
x[~azero & bzero] = np.inf
x[~bzero] = alpha[~bzero]/beta[~bzero]
assert_allclose(expected_eigvals, x)
class TestOrdQZWorkspaceSize(object):
def setup_method(self):
seed(12345)
def test_decompose(self):
N = 202
# raises error if lwork parameter to dtrsen is too small
for ddtype in [np.float32, np.float64]:
A = random((N,N)).astype(ddtype)
B = random((N,N)).astype(ddtype)
# sort = lambda alphar, alphai, beta: alphar**2 + alphai**2< beta**2
sort = lambda alpha, beta: alpha < beta
[S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='real')
for ddtype in [np.complex, np.complex64]:
A = random((N,N)).astype(ddtype)
B = random((N,N)).astype(ddtype)
sort = lambda alpha, beta: alpha < beta
[S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='complex')
@pytest.mark.slow
def test_decompose_ouc(self):
N = 202
# segfaults if lwork parameter to dtrsen is too small
for ddtype in [np.float32, np.float64, np.complex, np.complex64]:
A = random((N,N)).astype(ddtype)
B = random((N,N)).astype(ddtype)
[S,T,alpha,beta,U,V] = ordqz(A,B,sort='ouc')
class TestDatacopied(object):
def test_datacopied(self):
from scipy.linalg.decomp import _datacopied
M = matrix([[0,1],[2,3]])
A = asarray(M)
L = M.tolist()
M2 = M.copy()
class Fake1:
def __array__(self):
return A
class Fake2:
__array_interface__ = A.__array_interface__
F1 = Fake1()
F2 = Fake2()
for item, status in [(M, False), (A, False), (L, True),
(M2, False), (F1, False), (F2, False)]:
arr = asarray(item)
assert_equal(_datacopied(arr, item), status,
err_msg=repr(item))
def test_aligned_mem_float():
"""Check linalg works with non-aligned memory"""
# Allocate 402 bytes of memory (allocated on boundary)
a = arange(402, dtype=np.uint8)
# Create an array with boundary offset 4
z = np.frombuffer(a.data, offset=2, count=100, dtype=float32)
z.shape = 10, 10
eig(z, overwrite_a=True)
eig(z.T, overwrite_a=True)
def test_aligned_mem():
"""Check linalg works with non-aligned memory"""
# Allocate 804 bytes of memory (allocated on boundary)
a = arange(804, dtype=np.uint8)
# Create an array with boundary offset 4
z = np.frombuffer(a.data, offset=4, count=100, dtype=float)
z.shape = 10, 10
eig(z, overwrite_a=True)
eig(z.T, overwrite_a=True)
def test_aligned_mem_complex():
"""Check that complex objects don't need to be completely aligned"""
# Allocate 1608 bytes of memory (allocated on boundary)
a = zeros(1608, dtype=np.uint8)
# Create an array with boundary offset 8
z = np.frombuffer(a.data, offset=8, count=100, dtype=complex)
z.shape = 10, 10
eig(z, overwrite_a=True)
# This does not need special handling
eig(z.T, overwrite_a=True)
def check_lapack_misaligned(func, args, kwargs):
args = list(args)
for i in range(len(args)):
a = args[:]
if isinstance(a[i],np.ndarray):
# Try misaligning a[i]
aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8)
aa = np.frombuffer(aa.data, offset=4, count=a[i].size, dtype=a[i].dtype)
aa.shape = a[i].shape
aa[...] = a[i]
a[i] = aa
func(*a,**kwargs)
if len(a[i].shape) > 1:
a[i] = a[i].T
func(*a,**kwargs)
@pytest.mark.xfail(run=False, reason="Ticket #1152, triggers a segfault in rare cases.")
def test_lapack_misaligned():
M = np.eye(10,dtype=float)
R = np.arange(100)
R.shape = 10,10
S = np.arange(20000,dtype=np.uint8)
S = np.frombuffer(S.data, offset=4, count=100, dtype=float)
S.shape = 10, 10
b = np.ones(10)
LU, piv = lu_factor(S)
for (func, args, kwargs) in [
(eig,(S,),dict(overwrite_a=True)), # crash
(eigvals,(S,),dict(overwrite_a=True)), # no crash
(lu,(S,),dict(overwrite_a=True)), # no crash
(lu_factor,(S,),dict(overwrite_a=True)), # no crash
(lu_solve,((LU,piv),b),dict(overwrite_b=True)),
(solve,(S,b),dict(overwrite_a=True,overwrite_b=True)),
(svd,(M,),dict(overwrite_a=True)), # no crash
(svd,(R,),dict(overwrite_a=True)), # no crash
(svd,(S,),dict(overwrite_a=True)), # crash
(svdvals,(S,),dict()), # no crash
(svdvals,(S,),dict(overwrite_a=True)), # crash
(cholesky,(M,),dict(overwrite_a=True)), # no crash
(qr,(S,),dict(overwrite_a=True)), # crash
(rq,(S,),dict(overwrite_a=True)), # crash
(hessenberg,(S,),dict(overwrite_a=True)), # crash
(schur,(S,),dict(overwrite_a=True)), # crash
]:
check_lapack_misaligned(func, args, kwargs)
# not properly tested
# cholesky, rsf2csf, lu_solve, solve, eig_banded, eigvals_banded, eigh, diagsvd
class TestOverwrite(object):
def test_eig(self):
assert_no_overwrite(eig, [(3,3)])
assert_no_overwrite(eig, [(3,3), (3,3)])
def test_eigh(self):
assert_no_overwrite(eigh, [(3,3)])
assert_no_overwrite(eigh, [(3,3), (3,3)])
def test_eig_banded(self):
assert_no_overwrite(eig_banded, [(3,2)])
def test_eigvals(self):
assert_no_overwrite(eigvals, [(3,3)])
def test_eigvalsh(self):
assert_no_overwrite(eigvalsh, [(3,3)])
def test_eigvals_banded(self):
assert_no_overwrite(eigvals_banded, [(3,2)])
def test_hessenberg(self):
assert_no_overwrite(hessenberg, [(3,3)])
def test_lu_factor(self):
assert_no_overwrite(lu_factor, [(3,3)])
def test_lu_solve(self):
x = np.array([[1,2,3], [4,5,6], [7,8,8]])
xlu = lu_factor(x)
assert_no_overwrite(lambda b: lu_solve(xlu, b), [(3,)])
def test_lu(self):
assert_no_overwrite(lu, [(3,3)])
def test_qr(self):
assert_no_overwrite(qr, [(3,3)])
def test_rq(self):
assert_no_overwrite(rq, [(3,3)])
def test_schur(self):
assert_no_overwrite(schur, [(3,3)])
def test_schur_complex(self):
assert_no_overwrite(lambda a: schur(a, 'complex'), [(3,3)],
dtypes=[np.float32, np.float64])
def test_svd(self):
assert_no_overwrite(svd, [(3,3)])
assert_no_overwrite(lambda a: svd(a, lapack_driver='gesvd'), [(3,3)])
def test_svdvals(self):
assert_no_overwrite(svdvals, [(3,3)])
def _check_orth(n, dtype, skip_big=False):
X = np.ones((n, 2), dtype=float).astype(dtype)
eps = np.finfo(dtype).eps
tol = 1000 * eps
Y = orth(X)
assert_equal(Y.shape, (n, 1))
assert_allclose(Y, Y.mean(), atol=tol)
Y = orth(X.T)
assert_equal(Y.shape, (2, 1))
assert_allclose(Y, Y.mean(), atol=tol)
if n > 5 and not skip_big:
np.random.seed(1)
X = np.random.rand(n, 5).dot(np.random.rand(5, n))
X = X + 1e-4 * np.random.rand(n, 1).dot(np.random.rand(1, n))
X = X.astype(dtype)
Y = orth(X, rcond=1e-3)
assert_equal(Y.shape, (n, 5))
Y = orth(X, rcond=1e-6)
assert_equal(Y.shape, (n, 5 + 1))
@pytest.mark.slow
@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8, reason="test only on 64-bit, else too slow")
def test_orth_memory_efficiency():
# Pick n so that 16*n bytes is reasonable but 8*n*n bytes is unreasonable.
# Keep in mind that @pytest.mark.slow tests are likely to be running
# under configurations that support 4Gb+ memory for tests related to
# 32 bit overflow.
n = 10*1000*1000
try:
_check_orth(n, np.float64, skip_big=True)
except MemoryError:
raise AssertionError('memory error perhaps caused by orth regression')
def test_orth():
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
sizes = [1, 2, 3, 10, 100]
for dt, n in itertools.product(dtypes, sizes):
_check_orth(n, dt)
def test_null_space():
np.random.seed(1)
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
sizes = [1, 2, 3, 10, 100]
for dt, n in itertools.product(dtypes, sizes):
X = np.ones((2, n), dtype=dt)
eps = np.finfo(dt).eps
tol = 1000 * eps
Y = null_space(X)
assert_equal(Y.shape, (n, n-1))
assert_allclose(X.dot(Y), 0, atol=tol)
Y = null_space(X.T)
assert_equal(Y.shape, (2, 1))
assert_allclose(X.T.dot(Y), 0, atol=tol)
X = np.random.randn(1 + n//2, n)
Y = null_space(X)
assert_equal(Y.shape, (n, n - 1 - n//2))
assert_allclose(X.dot(Y), 0, atol=tol)
if n > 5:
np.random.seed(1)
X = np.random.rand(n, 5).dot(np.random.rand(5, n))
X = X + 1e-4 * np.random.rand(n, 1).dot(np.random.rand(1, n))
X = X.astype(dt)
Y = null_space(X, rcond=1e-3)
assert_equal(Y.shape, (n, n - 5))
Y = null_space(X, rcond=1e-6)
assert_equal(Y.shape, (n, n - 6))
def test_subspace_angles():
H = hadamard(8, float)
A = H[:, :3]
B = H[:, 3:]
assert_allclose(subspace_angles(A, B), [np.pi / 2.] * 3, atol=1e-14)
assert_allclose(subspace_angles(B, A), [np.pi / 2.] * 3, atol=1e-14)
for x in (A, B):
assert_allclose(subspace_angles(x, x), np.zeros(x.shape[1]),
atol=1e-14)
# From MATLAB function "subspace", which effectively only returns the
# last value that we calculate
x = np.array(
[[0.537667139546100, 0.318765239858981, 3.578396939725760, 0.725404224946106], # noqa: E501
[1.833885014595086, -1.307688296305273, 2.769437029884877, -0.063054873189656], # noqa: E501
[-2.258846861003648, -0.433592022305684, -1.349886940156521, 0.714742903826096], # noqa: E501
[0.862173320368121, 0.342624466538650, 3.034923466331855, -0.204966058299775]]) # noqa: E501
expected = 1.481454682101605
assert_allclose(subspace_angles(x[:, :2], x[:, 2:])[0], expected,
rtol=1e-12)
assert_allclose(subspace_angles(x[:, 2:], x[:, :2])[0], expected,
rtol=1e-12)
expected = 0.746361174247302
assert_allclose(subspace_angles(x[:, :2], x[:, [2]]), expected, rtol=1e-12)
assert_allclose(subspace_angles(x[:, [2]], x[:, :2]), expected, rtol=1e-12)
expected = 0.487163718534313
assert_allclose(subspace_angles(x[:, :3], x[:, [3]]), expected, rtol=1e-12)
assert_allclose(subspace_angles(x[:, [3]], x[:, :3]), expected, rtol=1e-12)
expected = 0.328950515907756
assert_allclose(subspace_angles(x[:, :2], x[:, 1:]), [expected, 0],
atol=1e-12)
# Degenerate conditions
assert_raises(ValueError, subspace_angles, x[0], x)
assert_raises(ValueError, subspace_angles, x, x[0])
assert_raises(ValueError, subspace_angles, x[:-1], x)
class TestCDF2RDF(object):
def matmul(self, a, b):
return np.einsum('...ij,...jk->...ik', a, b)
def assert_eig_valid(self, w, v, x):
assert_array_almost_equal(
self.matmul(v, w),
self.matmul(x, v)
)
def test_single_array0x0real(self):
# eig doesn't support 0x0 in old versions of numpy
X = np.empty((0, 0))
w, v = np.empty(0), np.empty((0, 0))
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_single_array2x2_real(self):
X = np.array([[1, 2], [3, -1]])
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_single_array2x2_complex(self):
X = np.array([[1, 2], [-2, 1]])
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_single_array3x3_real(self):
X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6]])
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_single_array3x3_complex(self):
X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]])
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_random_1d_stacked_arrays(self):
# cannot test M == 0 due to bug in old numpy
for M in range(1, 7):
X = np.random.rand(10000, M, M)
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_random_2d_stacked_arrays(self):
# cannot test M == 0 due to bug in old numpy
for M in range(1, 7):
X = np.random.rand(100, 100, M, M)
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_low_dimensionality_error(self):
w, v = np.empty(()), np.array((2,))
assert_raises(ValueError, cdf2rdf, w, v)
def test_not_square_error(self):
# Check that passing a non-square array raises a ValueError.
w, v = np.arange(3), np.arange(6).reshape(3,2)
assert_raises(ValueError, cdf2rdf, w, v)
def test_swapped_v_w_error(self):
# Check that exchanging places of w and v raises ValueError.
X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]])
w, v = np.linalg.eig(X)
assert_raises(ValueError, cdf2rdf, v, w)
def test_non_associated_error(self):
# Check that passing non-associated eigenvectors raises a ValueError.
w, v = np.arange(3), np.arange(16).reshape(4,4)
assert_raises(ValueError, cdf2rdf, w, v)
def test_not_conjugate_pairs(self):
# Check that passing non-conjugate pairs raises a ValueError.
X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]])
w, v = np.linalg.eig(X)
assert_raises(ValueError, cdf2rdf, w, v)
# different arrays in the stack, so not conjugate
X = np.array([
[[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]],
[[1, 2, 3], [1, 2, 3], [2, 5, 6-1j]],
])
w, v = np.linalg.eig(X)
assert_raises(ValueError, cdf2rdf, w, v)
TST: Reduce size of random matrices to decrease runtime
""" Test functions for linalg.decomp module
"""
from __future__ import division, print_function, absolute_import
__usage__ = """
Build linalg:
python setup_linalg.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.linalg.test()'
"""
import itertools
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_equal,
assert_, assert_allclose)
import pytest
from pytest import raises as assert_raises
from scipy._lib.six import xrange
from scipy.linalg import (eig, eigvals, lu, svd, svdvals, cholesky, qr,
schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd, hessenberg, rq,
eig_banded, eigvals_banded, eigh, eigvalsh, qr_multiply, qz, orth, ordqz,
subspace_angles, hadamard, eigvalsh_tridiagonal, eigh_tridiagonal,
null_space, cdf2rdf)
from scipy.linalg.lapack import dgbtrf, dgbtrs, zgbtrf, zgbtrs, \
dsbev, dsbevd, dsbevx, zhbevd, zhbevx
from scipy.linalg.misc import norm
from scipy.linalg._decomp_qz import _select_function
from numpy import array, transpose, sometrue, diag, ones, linalg, \
argsort, zeros, arange, float32, complex64, dot, conj, identity, \
ravel, sqrt, iscomplex, shape, sort, conjugate, bmat, sign, \
asarray, matrix, isfinite, all, ndarray, outer, eye, dtype, empty,\
triu, tril
from numpy.random import normal, seed, random
from scipy.linalg._testutils import assert_no_overwrite
# digit precision to use in asserts for different types
DIGITS = {'d':11, 'D':11, 'f':4, 'F':4}
def clear_fuss(ar, fuss_binary_bits=7):
"""Clears trailing `fuss_binary_bits` of mantissa of a floating number"""
x = np.asanyarray(ar)
if np.iscomplexobj(x):
return clear_fuss(x.real) + 1j * clear_fuss(x.imag)
significant_binary_bits = np.finfo(x.dtype).nmant
x_mant, x_exp = np.frexp(x)
f = 2.0**(significant_binary_bits - fuss_binary_bits)
x_mant *= f
np.rint(x_mant, out=x_mant)
x_mant /= f
return np.ldexp(x_mant, x_exp)
# XXX: This function should be available through numpy.testing
def assert_dtype_equal(act, des):
if isinstance(act, ndarray):
act = act.dtype
else:
act = dtype(act)
if isinstance(des, ndarray):
des = des.dtype
else:
des = dtype(des)
assert_(act == des, 'dtype mismatch: "%s" (should be "%s") ' % (act, des))
# XXX: This function should not be defined here, but somewhere in
# scipy.linalg namespace
def symrand(dim_or_eigv):
"""Return a random symmetric (Hermitian) matrix.
If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues
uniformly distributed on (-1,1).
If 'dim_or_eigv' is 1-D real array 'a', return a matrix whose
eigenvalues are 'a'.
"""
if isinstance(dim_or_eigv, int):
dim = dim_or_eigv
d = random(dim)*2 - 1
elif (isinstance(dim_or_eigv, ndarray) and
len(dim_or_eigv.shape) == 1):
dim = dim_or_eigv.shape[0]
d = dim_or_eigv
else:
raise TypeError("input type not supported.")
v = random_rot(dim)
h = dot(dot(v.T.conj(), diag(d)), v)
# to avoid roundoff errors, symmetrize the matrix (again)
h = 0.5*(h.T+h)
return h
# XXX: This function should not be defined here, but somewhere in
# scipy.linalg namespace
def random_rot(dim):
"""Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., 'The efficient generation of random orthogonal
matrices with an application to condition estimators', SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization"""
H = eye(dim)
D = ones((dim,))
for n in range(1, dim):
x = normal(size=(dim-n+1,))
D[n-1] = sign(x[0])
x[0] -= D[n-1]*sqrt((x*x).sum())
# Householder transformation
Hx = eye(dim-n+1) - 2.*outer(x, x)/(x*x).sum()
mat = eye(dim)
mat[n-1:,n-1:] = Hx
H = dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = -D.prod()
H = (D*H.T).T
return H
class TestEigVals(object):
def test_simple(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w = eigvals(a)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
assert_array_almost_equal(w,exact_w)
def test_simple_tr(self):
a = array([[1,2,3],[1,2,3],[2,5,6]],'d')
a = transpose(a).copy()
a = transpose(a)
w = eigvals(a)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
assert_array_almost_equal(w,exact_w)
def test_simple_complex(self):
a = [[1,2,3],[1,2,3],[2,5,6+1j]]
w = eigvals(a)
exact_w = [(9+1j+sqrt(92+6j))/2,
0,
(9+1j-sqrt(92+6j))/2]
assert_array_almost_equal(w,exact_w)
def test_finite(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w = eigvals(a, check_finite=False)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
assert_array_almost_equal(w,exact_w)
class TestEig(object):
def test_simple(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w,v = eig(a)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
v0 = array([1,1,(1+sqrt(93)/3)/2])
v1 = array([3.,0,-1])
v2 = array([1,1,(1-sqrt(93)/3)/2])
v0 = v0 / sqrt(dot(v0,transpose(v0)))
v1 = v1 / sqrt(dot(v1,transpose(v1)))
v2 = v2 / sqrt(dot(v2,transpose(v2)))
assert_array_almost_equal(w,exact_w)
assert_array_almost_equal(v0,v[:,0]*sign(v[0,0]))
assert_array_almost_equal(v1,v[:,1]*sign(v[0,1]))
assert_array_almost_equal(v2,v[:,2]*sign(v[0,2]))
for i in range(3):
assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])
w,v = eig(a,left=1,right=0)
for i in range(3):
assert_array_almost_equal(dot(transpose(a),v[:,i]),w[i]*v[:,i])
def test_simple_complex_eig(self):
a = [[1,2],[-2,1]]
w,vl,vr = eig(a,left=1,right=1)
assert_array_almost_equal(w, array([1+2j, 1-2j]))
for i in range(2):
assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i])
for i in range(2):
assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]),
conjugate(w[i])*vl[:,i])
def test_simple_complex(self):
a = [[1,2,3],[1,2,3],[2,5,6+1j]]
w,vl,vr = eig(a,left=1,right=1)
for i in range(3):
assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i])
for i in range(3):
assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]),
conjugate(w[i])*vl[:,i])
def test_gh_3054(self):
a = [[1]]
b = [[0]]
w, vr = eig(a, b, homogeneous_eigvals=True)
assert_allclose(w[1,0], 0)
assert_(w[0,0] != 0)
assert_allclose(vr, 1)
w, vr = eig(a, b)
assert_equal(w, np.inf)
assert_allclose(vr, 1)
def _check_gen_eig(self, A, B):
if B is not None:
A, B = asarray(A), asarray(B)
B0 = B
else:
A = asarray(A)
B0 = B
B = np.eye(*A.shape)
msg = "\n%r\n%r" % (A, B)
# Eigenvalues in homogeneous coordinates
w, vr = eig(A, B0, homogeneous_eigvals=True)
wt = eigvals(A, B0, homogeneous_eigvals=True)
val1 = dot(A, vr) * w[1,:]
val2 = dot(B, vr) * w[0,:]
for i in range(val1.shape[1]):
assert_allclose(val1[:,i], val2[:,i], rtol=1e-13, atol=1e-13, err_msg=msg)
if B0 is None:
assert_allclose(w[1,:], 1)
assert_allclose(wt[1,:], 1)
perm = np.lexsort(w)
permt = np.lexsort(wt)
assert_allclose(w[:,perm], wt[:,permt], atol=1e-7, rtol=1e-7,
err_msg=msg)
length = np.empty(len(vr))
for i in xrange(len(vr)):
length[i] = norm(vr[:,i])
assert_allclose(length, np.ones(length.size), err_msg=msg,
atol=1e-7, rtol=1e-7)
# Convert homogeneous coordinates
beta_nonzero = (w[1,:] != 0)
wh = w[0,beta_nonzero] / w[1,beta_nonzero]
# Eigenvalues in standard coordinates
w, vr = eig(A, B0)
wt = eigvals(A, B0)
val1 = dot(A, vr)
val2 = dot(B, vr) * w
res = val1 - val2
for i in range(res.shape[1]):
if all(isfinite(res[:,i])):
assert_allclose(res[:,i], 0, rtol=1e-13, atol=1e-13, err_msg=msg)
w_fin = w[isfinite(w)]
wt_fin = wt[isfinite(wt)]
perm = argsort(clear_fuss(w_fin))
permt = argsort(clear_fuss(wt_fin))
assert_allclose(w[perm], wt[permt],
atol=1e-7, rtol=1e-7, err_msg=msg)
length = np.empty(len(vr))
for i in xrange(len(vr)):
length[i] = norm(vr[:,i])
assert_allclose(length, np.ones(length.size), err_msg=msg)
# Compare homogeneous and nonhomogeneous versions
assert_allclose(sort(wh), sort(w[np.isfinite(w)]))
@pytest.mark.xfail(reason="See gh-2254.")
def test_singular(self):
# Example taken from
# http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html
A = array(([22,34,31,31,17], [45,45,42,19,29], [39,47,49,26,34],
[27,31,26,21,15], [38,44,44,24,30]))
B = array(([13,26,25,17,24], [31,46,40,26,37], [26,40,19,25,25],
[16,25,27,14,23], [24,35,18,21,22]))
olderr = np.seterr(all='ignore')
try:
self._check_gen_eig(A, B)
finally:
np.seterr(**olderr)
def test_falker(self):
# Test matrices giving some Nan generalized eigenvalues.
M = diag(array(([1,0,3])))
K = array(([2,-1,-1],[-1,2,-1],[-1,-1,2]))
D = array(([1,-1,0],[-1,1,0],[0,0,0]))
Z = zeros((3,3))
I3 = identity(3)
A = bmat([[I3, Z], [Z, -K]])
B = bmat([[Z, I3], [M, D]])
olderr = np.seterr(all='ignore')
try:
self._check_gen_eig(A, B)
finally:
np.seterr(**olderr)
def test_bad_geneig(self):
# Ticket #709 (strange return values from DGGEV)
def matrices(omega):
c1 = -9 + omega**2
c2 = 2*omega
A = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, c1, 0],
[0, 0, 0, c1]]
B = [[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, -c2],
[0, 1, c2, 0]]
return A, B
# With a buggy LAPACK, this can fail for different omega on different
# machines -- so we need to test several values
olderr = np.seterr(all='ignore')
try:
for k in xrange(100):
A, B = matrices(omega=k*5./100)
self._check_gen_eig(A, B)
finally:
np.seterr(**olderr)
def test_make_eigvals(self):
# Step through all paths in _make_eigvals
seed(1234)
# Real eigenvalues
A = symrand(3)
self._check_gen_eig(A, None)
B = symrand(3)
self._check_gen_eig(A, B)
# Complex eigenvalues
A = random((3, 3)) + 1j*random((3, 3))
self._check_gen_eig(A, None)
B = random((3, 3)) + 1j*random((3, 3))
self._check_gen_eig(A, B)
def test_check_finite(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w,v = eig(a, check_finite=False)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
v0 = array([1,1,(1+sqrt(93)/3)/2])
v1 = array([3.,0,-1])
v2 = array([1,1,(1-sqrt(93)/3)/2])
v0 = v0 / sqrt(dot(v0,transpose(v0)))
v1 = v1 / sqrt(dot(v1,transpose(v1)))
v2 = v2 / sqrt(dot(v2,transpose(v2)))
assert_array_almost_equal(w,exact_w)
assert_array_almost_equal(v0,v[:,0]*sign(v[0,0]))
assert_array_almost_equal(v1,v[:,1]*sign(v[0,1]))
assert_array_almost_equal(v2,v[:,2]*sign(v[0,2]))
for i in range(3):
assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])
def test_not_square_error(self):
"""Check that passing a non-square array raises a ValueError."""
A = np.arange(6).reshape(3,2)
assert_raises(ValueError, eig, A)
def test_shape_mismatch(self):
"""Check that passing arrays of with different shapes raises a ValueError."""
A = identity(2)
B = np.arange(9.0).reshape(3,3)
assert_raises(ValueError, eig, A, B)
assert_raises(ValueError, eig, B, A)
class TestEigBanded(object):
def setup_method(self):
self.create_bandmat()
def create_bandmat(self):
"""Create the full matrix `self.fullmat` and
the corresponding band matrix `self.bandmat`."""
N = 10
self.KL = 2 # number of subdiagonals (below the diagonal)
self.KU = 2 # number of superdiagonals (above the diagonal)
# symmetric band matrix
self.sym_mat = (diag(1.0*ones(N))
+ diag(-1.0*ones(N-1), -1) + diag(-1.0*ones(N-1), 1)
+ diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
# hermitian band matrix
self.herm_mat = (diag(-1.0*ones(N))
+ 1j*diag(1.0*ones(N-1), -1) - 1j*diag(1.0*ones(N-1), 1)
+ diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
# general real band matrix
self.real_mat = (diag(1.0*ones(N))
+ diag(-1.0*ones(N-1), -1) + diag(-3.0*ones(N-1), 1)
+ diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
# general complex band matrix
self.comp_mat = (1j*diag(1.0*ones(N))
+ diag(-1.0*ones(N-1), -1) + 1j*diag(-3.0*ones(N-1), 1)
+ diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
# Eigenvalues and -vectors from linalg.eig
ew, ev = linalg.eig(self.sym_mat)
ew = ew.real
args = argsort(ew)
self.w_sym_lin = ew[args]
self.evec_sym_lin = ev[:,args]
ew, ev = linalg.eig(self.herm_mat)
ew = ew.real
args = argsort(ew)
self.w_herm_lin = ew[args]
self.evec_herm_lin = ev[:,args]
# Extract upper bands from symmetric and hermitian band matrices
# (for use in dsbevd, dsbevx, zhbevd, zhbevx
# and their single precision versions)
LDAB = self.KU + 1
self.bandmat_sym = zeros((LDAB, N), dtype=float)
self.bandmat_herm = zeros((LDAB, N), dtype=complex)
for i in xrange(LDAB):
self.bandmat_sym[LDAB-i-1,i:N] = diag(self.sym_mat, i)
self.bandmat_herm[LDAB-i-1,i:N] = diag(self.herm_mat, i)
# Extract bands from general real and complex band matrix
# (for use in dgbtrf, dgbtrs and their single precision versions)
LDAB = 2*self.KL + self.KU + 1
self.bandmat_real = zeros((LDAB, N), dtype=float)
self.bandmat_real[2*self.KL,:] = diag(self.real_mat) # diagonal
for i in xrange(self.KL):
# superdiagonals
self.bandmat_real[2*self.KL-1-i,i+1:N] = diag(self.real_mat, i+1)
# subdiagonals
self.bandmat_real[2*self.KL+1+i,0:N-1-i] = diag(self.real_mat,-i-1)
self.bandmat_comp = zeros((LDAB, N), dtype=complex)
self.bandmat_comp[2*self.KL,:] = diag(self.comp_mat) # diagonal
for i in xrange(self.KL):
# superdiagonals
self.bandmat_comp[2*self.KL-1-i,i+1:N] = diag(self.comp_mat, i+1)
# subdiagonals
self.bandmat_comp[2*self.KL+1+i,0:N-1-i] = diag(self.comp_mat,-i-1)
# absolute value for linear equation system A*x = b
self.b = 1.0*arange(N)
self.bc = self.b * (1 + 1j)
#####################################################################
def test_dsbev(self):
"""Compare dsbev eigenvalues and eigenvectors with
the result of linalg.eig."""
w, evec, info = dsbev(self.bandmat_sym, compute_v=1)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_sym_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
def test_dsbevd(self):
"""Compare dsbevd eigenvalues and eigenvectors with
the result of linalg.eig."""
w, evec, info = dsbevd(self.bandmat_sym, compute_v=1)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_sym_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
def test_dsbevx(self):
"""Compare dsbevx eigenvalues and eigenvectors
with the result of linalg.eig."""
N,N = shape(self.sym_mat)
## Achtung: Argumente 0.0,0.0,range?
w, evec, num, ifail, info = dsbevx(self.bandmat_sym, 0.0, 0.0, 1, N,
compute_v=1, range=2)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_sym_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
def test_zhbevd(self):
"""Compare zhbevd eigenvalues and eigenvectors
with the result of linalg.eig."""
w, evec, info = zhbevd(self.bandmat_herm, compute_v=1)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_herm_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))
def test_zhbevx(self):
"""Compare zhbevx eigenvalues and eigenvectors
with the result of linalg.eig."""
N,N = shape(self.herm_mat)
## Achtung: Argumente 0.0,0.0,range?
w, evec, num, ifail, info = zhbevx(self.bandmat_herm, 0.0, 0.0, 1, N,
compute_v=1, range=2)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_herm_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))
def test_eigvals_banded(self):
"""Compare eigenvalues of eigvals_banded with those of linalg.eig."""
w_sym = eigvals_banded(self.bandmat_sym)
w_sym = w_sym.real
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
w_herm = eigvals_banded(self.bandmat_herm)
w_herm = w_herm.real
assert_array_almost_equal(sort(w_herm), self.w_herm_lin)
# extracting eigenvalues with respect to an index range
ind1 = 2
ind2 = 6
w_sym_ind = eigvals_banded(self.bandmat_sym,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_sym_ind),
self.w_sym_lin[ind1:ind2+1])
w_herm_ind = eigvals_banded(self.bandmat_herm,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_herm_ind),
self.w_herm_lin[ind1:ind2+1])
# extracting eigenvalues with respect to a value range
v_lower = self.w_sym_lin[ind1] - 1.0e-5
v_upper = self.w_sym_lin[ind2] + 1.0e-5
w_sym_val = eigvals_banded(self.bandmat_sym,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_sym_val),
self.w_sym_lin[ind1:ind2+1])
v_lower = self.w_herm_lin[ind1] - 1.0e-5
v_upper = self.w_herm_lin[ind2] + 1.0e-5
w_herm_val = eigvals_banded(self.bandmat_herm,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_herm_val),
self.w_herm_lin[ind1:ind2+1])
w_sym = eigvals_banded(self.bandmat_sym, check_finite=False)
w_sym = w_sym.real
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
def test_eig_banded(self):
"""Compare eigenvalues and eigenvectors of eig_banded
with those of linalg.eig. """
w_sym, evec_sym = eig_banded(self.bandmat_sym)
evec_sym_ = evec_sym[:,argsort(w_sym.real)]
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))
w_herm, evec_herm = eig_banded(self.bandmat_herm)
evec_herm_ = evec_herm[:,argsort(w_herm.real)]
assert_array_almost_equal(sort(w_herm), self.w_herm_lin)
assert_array_almost_equal(abs(evec_herm_), abs(self.evec_herm_lin))
# extracting eigenvalues with respect to an index range
ind1 = 2
ind2 = 6
w_sym_ind, evec_sym_ind = eig_banded(self.bandmat_sym,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_sym_ind),
self.w_sym_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_sym_ind),
abs(self.evec_sym_lin[:,ind1:ind2+1]))
w_herm_ind, evec_herm_ind = eig_banded(self.bandmat_herm,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_herm_ind),
self.w_herm_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_herm_ind),
abs(self.evec_herm_lin[:,ind1:ind2+1]))
# extracting eigenvalues with respect to a value range
v_lower = self.w_sym_lin[ind1] - 1.0e-5
v_upper = self.w_sym_lin[ind2] + 1.0e-5
w_sym_val, evec_sym_val = eig_banded(self.bandmat_sym,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_sym_val),
self.w_sym_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_sym_val),
abs(self.evec_sym_lin[:,ind1:ind2+1]))
v_lower = self.w_herm_lin[ind1] - 1.0e-5
v_upper = self.w_herm_lin[ind2] + 1.0e-5
w_herm_val, evec_herm_val = eig_banded(self.bandmat_herm,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_herm_val),
self.w_herm_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_herm_val),
abs(self.evec_herm_lin[:,ind1:ind2+1]))
w_sym, evec_sym = eig_banded(self.bandmat_sym, check_finite=False)
evec_sym_ = evec_sym[:,argsort(w_sym.real)]
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))
def test_dgbtrf(self):
"""Compare dgbtrf LU factorisation with the LU factorisation result
of linalg.lu."""
M,N = shape(self.real_mat)
lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)
# extract matrix u from lu_symm_band
u = diag(lu_symm_band[2*self.KL,:])
for i in xrange(self.KL + self.KU):
u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1)
p_lin, l_lin, u_lin = lu(self.real_mat, permute_l=0)
assert_array_almost_equal(u, u_lin)
def test_zgbtrf(self):
"""Compare zgbtrf LU factorisation with the LU factorisation result
of linalg.lu."""
M,N = shape(self.comp_mat)
lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)
# extract matrix u from lu_symm_band
u = diag(lu_symm_band[2*self.KL,:])
for i in xrange(self.KL + self.KU):
u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1)
p_lin, l_lin, u_lin = lu(self.comp_mat, permute_l=0)
assert_array_almost_equal(u, u_lin)
def test_dgbtrs(self):
"""Compare dgbtrs solutions for linear equation system A*x = b
with solutions of linalg.solve."""
lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)
y, info = dgbtrs(lu_symm_band, self.KL, self.KU, self.b, ipiv)
y_lin = linalg.solve(self.real_mat, self.b)
assert_array_almost_equal(y, y_lin)
def test_zgbtrs(self):
"""Compare zgbtrs solutions for linear equation system A*x = b
with solutions of linalg.solve."""
lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)
y, info = zgbtrs(lu_symm_band, self.KL, self.KU, self.bc, ipiv)
y_lin = linalg.solve(self.comp_mat, self.bc)
assert_array_almost_equal(y, y_lin)
class TestEigTridiagonal(object):
def setup_method(self):
self.create_trimat()
def create_trimat(self):
"""Create the full matrix `self.fullmat`, `self.d`, and `self.e`."""
N = 10
# symmetric band matrix
self.d = 1.0*ones(N)
self.e = -1.0*ones(N-1)
self.full_mat = (diag(self.d) + diag(self.e, -1) + diag(self.e, 1))
ew, ev = linalg.eig(self.full_mat)
ew = ew.real
args = argsort(ew)
self.w = ew[args]
self.evec = ev[:, args]
def test_degenerate(self):
"""Test error conditions."""
# Wrong sizes
assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e[:-1])
# Must be real
assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e * 1j)
# Bad driver
assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e,
lapack_driver=1.)
assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e,
lapack_driver='foo')
# Bad bounds
assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e,
select='i', select_range=(0, -1))
def test_eigvalsh_tridiagonal(self):
"""Compare eigenvalues of eigvalsh_tridiagonal with those of eig."""
# can't use ?STERF with subselection
for driver in ('sterf', 'stev', 'stebz', 'stemr', 'auto'):
w = eigvalsh_tridiagonal(self.d, self.e, lapack_driver=driver)
assert_array_almost_equal(sort(w), self.w)
for driver in ('sterf', 'stev'):
assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e,
lapack_driver='stev', select='i',
select_range=(0, 1))
for driver in ('stebz', 'stemr', 'auto'):
# extracting eigenvalues with respect to the full index range
w_ind = eigvalsh_tridiagonal(
self.d, self.e, select='i', select_range=(0, len(self.d)-1),
lapack_driver=driver)
assert_array_almost_equal(sort(w_ind), self.w)
# extracting eigenvalues with respect to an index range
ind1 = 2
ind2 = 6
w_ind = eigvalsh_tridiagonal(
self.d, self.e, select='i', select_range=(ind1, ind2),
lapack_driver=driver)
assert_array_almost_equal(sort(w_ind), self.w[ind1:ind2+1])
# extracting eigenvalues with respect to a value range
v_lower = self.w[ind1] - 1.0e-5
v_upper = self.w[ind2] + 1.0e-5
w_val = eigvalsh_tridiagonal(
self.d, self.e, select='v', select_range=(v_lower, v_upper),
lapack_driver=driver)
assert_array_almost_equal(sort(w_val), self.w[ind1:ind2+1])
def test_eigh_tridiagonal(self):
"""Compare eigenvalues and eigenvectors of eigh_tridiagonal
with those of eig. """
# can't use ?STERF when eigenvectors are requested
assert_raises(ValueError, eigh_tridiagonal, self.d, self.e,
lapack_driver='sterf')
for driver in ('stebz', 'stev', 'stemr', 'auto'):
w, evec = eigh_tridiagonal(self.d, self.e, lapack_driver=driver)
evec_ = evec[:, argsort(w)]
assert_array_almost_equal(sort(w), self.w)
assert_array_almost_equal(abs(evec_), abs(self.evec))
assert_raises(ValueError, eigh_tridiagonal, self.d, self.e,
lapack_driver='stev', select='i', select_range=(0, 1))
for driver in ('stebz', 'stemr', 'auto'):
# extracting eigenvalues with respect to an index range
ind1 = 0
ind2 = len(self.d)-1
w, evec = eigh_tridiagonal(
self.d, self.e, select='i', select_range=(ind1, ind2),
lapack_driver=driver)
assert_array_almost_equal(sort(w), self.w)
assert_array_almost_equal(abs(evec), abs(self.evec))
ind1 = 2
ind2 = 6
w, evec = eigh_tridiagonal(
self.d, self.e, select='i', select_range=(ind1, ind2),
lapack_driver=driver)
assert_array_almost_equal(sort(w), self.w[ind1:ind2+1])
assert_array_almost_equal(abs(evec),
abs(self.evec[:, ind1:ind2+1]))
# extracting eigenvalues with respect to a value range
v_lower = self.w[ind1] - 1.0e-5
v_upper = self.w[ind2] + 1.0e-5
w, evec = eigh_tridiagonal(
self.d, self.e, select='v', select_range=(v_lower, v_upper),
lapack_driver=driver)
assert_array_almost_equal(sort(w), self.w[ind1:ind2+1])
assert_array_almost_equal(abs(evec),
abs(self.evec[:, ind1:ind2+1]))
def test_eigh():
DIM = 6
v = {'dim': (DIM,),
'dtype': ('f','d','F','D'),
'overwrite': (True, False),
'lower': (True, False),
'turbo': (True, False),
'eigvals': (None, (2, DIM-2))}
for dim in v['dim']:
for typ in v['dtype']:
for overwrite in v['overwrite']:
for turbo in v['turbo']:
for eigenvalues in v['eigvals']:
for lower in v['lower']:
eigenhproblem_standard(
'ordinary',
dim, typ, overwrite, lower,
turbo, eigenvalues)
eigenhproblem_general(
'general ',
dim, typ, overwrite, lower,
turbo, eigenvalues)
def test_eigh_of_sparse():
# This tests the rejection of inputs that eigh cannot currently handle.
import scipy.sparse
a = scipy.sparse.identity(2).tocsc()
b = np.atleast_2d(a)
assert_raises(ValueError, eigh, a)
assert_raises(ValueError, eigh, b)
def _complex_symrand(dim, dtype):
a1, a2 = symrand(dim), symrand(dim)
# add antisymmetric matrix as imag part
a = a1 + 1j*(triu(a2)-tril(a2))
return a.astype(dtype)
def eigenhproblem_standard(desc, dim, dtype,
overwrite, lower, turbo,
eigenvalues):
"""Solve a standard eigenvalue problem."""
if iscomplex(empty(1, dtype=dtype)):
a = _complex_symrand(dim, dtype)
else:
a = symrand(dim).astype(dtype)
if overwrite:
a_c = a.copy()
else:
a_c = a
w, z = eigh(a, overwrite_a=overwrite, lower=lower, eigvals=eigenvalues)
assert_dtype_equal(z.dtype, dtype)
w = w.astype(dtype)
diag_ = diag(dot(z.T.conj(), dot(a_c, z))).real
assert_array_almost_equal(diag_, w, DIGITS[dtype])
def eigenhproblem_general(desc, dim, dtype,
overwrite, lower, turbo,
eigenvalues):
"""Solve a generalized eigenvalue problem."""
if iscomplex(empty(1, dtype=dtype)):
a = _complex_symrand(dim, dtype)
b = _complex_symrand(dim, dtype)+diag([2.1]*dim).astype(dtype)
else:
a = symrand(dim).astype(dtype)
b = symrand(dim).astype(dtype)+diag([2.1]*dim).astype(dtype)
if overwrite:
a_c, b_c = a.copy(), b.copy()
else:
a_c, b_c = a, b
w, z = eigh(a, b, overwrite_a=overwrite, lower=lower,
overwrite_b=overwrite, turbo=turbo, eigvals=eigenvalues)
assert_dtype_equal(z.dtype, dtype)
w = w.astype(dtype)
diag1_ = diag(dot(z.T.conj(), dot(a_c, z))).real
assert_array_almost_equal(diag1_, w, DIGITS[dtype])
diag2_ = diag(dot(z.T.conj(), dot(b_c, z))).real
assert_array_almost_equal(diag2_, ones(diag2_.shape[0]), DIGITS[dtype])
def test_eigh_integer():
a = array([[1,2],[2,7]])
b = array([[3,1],[1,5]])
w,z = eigh(a)
w,z = eigh(a,b)
class TestLU(object):
def setup_method(self):
self.a = array([[1,2,3],[1,2,3],[2,5,6]])
self.ca = array([[1,2,3],[1,2,3],[2,5j,6]])
# Those matrices are more robust to detect problems in permutation
# matrices than the ones above
self.b = array([[1,2,3],[4,5,6],[7,8,9]])
self.cb = array([[1j,2j,3j],[4j,5j,6j],[7j,8j,9j]])
# Reectangular matrices
self.hrect = array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])
self.chrect = 1.j * array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])
self.vrect = array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])
self.cvrect = 1.j * array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])
# Medium sizes matrices
self.med = random((30, 40))
self.cmed = random((30, 40)) + 1.j * random((30, 40))
def _test_common(self, data):
p,l,u = lu(data)
assert_array_almost_equal(dot(dot(p,l),u),data)
pl,u = lu(data,permute_l=1)
assert_array_almost_equal(dot(pl,u),data)
# Simple tests
def test_simple(self):
self._test_common(self.a)
def test_simple_complex(self):
self._test_common(self.ca)
def test_simple2(self):
self._test_common(self.b)
def test_simple2_complex(self):
self._test_common(self.cb)
# rectangular matrices tests
def test_hrectangular(self):
self._test_common(self.hrect)
def test_vrectangular(self):
self._test_common(self.vrect)
def test_hrectangular_complex(self):
self._test_common(self.chrect)
def test_vrectangular_complex(self):
self._test_common(self.cvrect)
# Bigger matrices
def test_medium1(self):
"""Check lu decomposition on medium size, rectangular matrix."""
self._test_common(self.med)
def test_medium1_complex(self):
"""Check lu decomposition on medium size, rectangular matrix."""
self._test_common(self.cmed)
def test_check_finite(self):
p, l, u = lu(self.a, check_finite=False)
assert_array_almost_equal(dot(dot(p,l),u), self.a)
def test_simple_known(self):
# Ticket #1458
for order in ['C', 'F']:
A = np.array([[2, 1],[0, 1.]], order=order)
LU, P = lu_factor(A)
assert_array_almost_equal(LU, np.array([[2, 1], [0, 1]]))
assert_array_equal(P, np.array([0, 1]))
class TestLUSingle(TestLU):
"""LU testers for single precision, real and double"""
def setup_method(self):
TestLU.setup_method(self)
self.a = self.a.astype(float32)
self.ca = self.ca.astype(complex64)
self.b = self.b.astype(float32)
self.cb = self.cb.astype(complex64)
self.hrect = self.hrect.astype(float32)
self.chrect = self.hrect.astype(complex64)
self.vrect = self.vrect.astype(float32)
self.cvrect = self.vrect.astype(complex64)
self.med = self.vrect.astype(float32)
self.cmed = self.vrect.astype(complex64)
class TestLUSolve(object):
def setup_method(self):
seed(1234)
def test_lu(self):
a0 = random((10,10))
b = random((10,))
for order in ['C', 'F']:
a = np.array(a0, order=order)
x1 = solve(a,b)
lu_a = lu_factor(a)
x2 = lu_solve(lu_a,b)
assert_array_almost_equal(x1,x2)
def test_check_finite(self):
a = random((10,10))
b = random((10,))
x1 = solve(a,b)
lu_a = lu_factor(a, check_finite=False)
x2 = lu_solve(lu_a,b, check_finite=False)
assert_array_almost_equal(x1,x2)
class TestSVD_GESDD(object):
def setup_method(self):
self.lapack_driver = 'gesdd'
seed(1234)
def test_degenerate(self):
assert_raises(TypeError, svd, [[1.]], lapack_driver=1.)
assert_raises(ValueError, svd, [[1.]], lapack_driver='foo')
def test_simple(self):
a = [[1,2,3],[1,20,3],[2,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(3))
assert_array_almost_equal(dot(transpose(vh),vh),identity(3))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_singular(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(3))
assert_array_almost_equal(dot(transpose(vh),vh),identity(3))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_underdet(self):
a = [[1,2,3],[4,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[0]))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_overdet(self):
a = [[1,2],[4,5],[3,4]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u), identity(u.shape[1]))
assert_array_almost_equal(dot(transpose(vh),vh),identity(2))
sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_random(self):
n = 20
m = 15
for i in range(3):
for a in [random([n,m]),random([m,n])]:
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[1]))
assert_array_almost_equal(dot(vh, transpose(vh)),identity(vh.shape[0]))
sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_complex(self):
a = [[1,2,3],[1,2j,3],[2,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1]))
assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(vh.shape[0]))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_random_complex(self):
n = 20
m = 15
for i in range(3):
for full_matrices in (True, False):
for a in [random([n,m]),random([m,n])]:
a = a + 1j*random(list(a.shape))
u,s,vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1]))
# This fails when [m,n]
# assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(len(vh),dtype=vh.dtype.char))
sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_crash_1580(self):
sizes = [(13, 23), (30, 50), (60, 100)]
np.random.seed(1234)
for sz in sizes:
for dt in [np.float32, np.float64, np.complex64, np.complex128]:
a = np.random.rand(*sz).astype(dt)
# should not crash
svd(a, lapack_driver=self.lapack_driver)
def test_check_finite(self):
a = [[1,2,3],[1,20,3],[2,5,6]]
u,s,vh = svd(a, check_finite=False, lapack_driver=self.lapack_driver)
assert_array_almost_equal(dot(transpose(u),u),identity(3))
assert_array_almost_equal(dot(transpose(vh),vh),identity(3))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_gh_5039(self):
# This is a smoke test for https://github.com/scipy/scipy/issues/5039
#
# The following is reported to raise "ValueError: On entry to DGESDD
# parameter number 12 had an illegal value".
# `interp1d([1,2,3,4], [1,2,3,4], kind='cubic')`
# This is reported to only show up on LAPACK 3.0.3.
#
# The matrix below is taken from the call to
# `B = _fitpack._bsplmat(order, xk)` in interpolate._find_smoothest
b = np.array(
[[0.16666667, 0.66666667, 0.16666667, 0., 0., 0.],
[0., 0.16666667, 0.66666667, 0.16666667, 0., 0.],
[0., 0., 0.16666667, 0.66666667, 0.16666667, 0.],
[0., 0., 0., 0.16666667, 0.66666667, 0.16666667]])
svd(b, lapack_driver=self.lapack_driver)
class TestSVD_GESVD(TestSVD_GESDD):
def setup_method(self):
self.lapack_driver = 'gesvd'
seed(1234)
class TestSVDVals(object):
def test_empty(self):
for a in [[]], np.empty((2, 0)), np.ones((0, 3)):
s = svdvals(a)
assert_equal(s, np.empty(0))
def test_simple(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
s = svdvals(a)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
def test_simple_underdet(self):
a = [[1,2,3],[4,5,6]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_overdet(self):
a = [[1,2],[4,5],[3,4]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_complex(self):
a = [[1,2,3],[1,20,3j],[2,5,6]]
s = svdvals(a)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
def test_simple_underdet_complex(self):
a = [[1,2,3],[4,5j,6]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_overdet_complex(self):
a = [[1,2],[4,5],[3j,4]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_check_finite(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
s = svdvals(a, check_finite=False)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
@pytest.mark.slow
def test_crash_2609(self):
np.random.seed(1234)
a = np.random.rand(1500, 2800)
# Shouldn't crash:
svdvals(a)
class TestDiagSVD(object):
def test_simple(self):
assert_array_almost_equal(diagsvd([1,0,0],3,3),[[1,0,0],[0,0,0],[0,0,0]])
class TestQR(object):
def setup_method(self):
seed(1234)
def test_simple(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_simple_left(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a)
c = [1, 2, 3]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
qc,r2 = qr_multiply(a, identity(3), "left")
assert_array_almost_equal(q, qc)
def test_simple_right(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a)
c = [1, 2, 3]
qc,r2 = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), qc)
assert_array_almost_equal(r, r2)
qc,r = qr_multiply(a, identity(3))
assert_array_almost_equal(q, qc)
def test_simple_pivoting(self):
a = np.asarray([[8,2,3],[2,9,3],[5,3,6]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_left_pivoting(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3]
qc,r,jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(dot(q, c), qc)
def test_simple_right_pivoting(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3]
qc,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), qc)
def test_simple_trap(self):
a = [[8,2,3],[2,9,3]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
def test_simple_trap_pivoting(self):
a = np.asarray([[8,2,3],[2,9,3]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_tall(self):
# full version
a = [[8,2],[2,9],[5,3]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_simple_tall_pivoting(self):
# full version pivoting
a = np.asarray([[8,2],[2,9],[5,3]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_tall_e(self):
# economy version
a = [[8,2],[2,9],[5,3]]
q,r = qr(a, mode='economic')
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (3,2))
assert_equal(r.shape, (2,2))
def test_simple_tall_e_pivoting(self):
# economy version pivoting
a = np.asarray([[8,2],[2,9],[5,3]])
q,r,p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p], mode='economic')
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_tall_left(self):
a = [[8,2],[2,9],[5,3]]
q,r = qr(a, mode="economic")
c = [1, 2]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
c = array([1,2,0])
qc,r2 = qr_multiply(a, c, "left", overwrite_c=True)
assert_array_almost_equal(dot(q, c[:2]), qc)
qc,r = qr_multiply(a, identity(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_tall_left_pivoting(self):
a = [[8,2],[2,9],[5,3]]
q,r,jpvt = qr(a, mode="economic", pivoting=True)
c = [1, 2]
qc,r,kpvt = qr_multiply(a, c, "left", True)
assert_array_equal(jpvt, kpvt)
assert_array_almost_equal(dot(q, c), qc)
qc,r,jpvt = qr_multiply(a, identity(2), "left", True)
assert_array_almost_equal(qc, q)
def test_simple_tall_right(self):
a = [[8,2],[2,9],[5,3]]
q,r = qr(a, mode="economic")
c = [1, 2, 3]
cq,r2 = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
assert_array_almost_equal(r, r2)
cq,r = qr_multiply(a, identity(3))
assert_array_almost_equal(cq, q)
def test_simple_tall_right_pivoting(self):
a = [[8,2],[2,9],[5,3]]
q,r,jpvt = qr(a, pivoting=True, mode="economic")
c = [1, 2, 3]
cq,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), cq)
cq,r,jpvt = qr_multiply(a, identity(3), pivoting=True)
assert_array_almost_equal(cq, q)
def test_simple_fat(self):
# full version
a = [[8,2,5],[2,9,3]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
def test_simple_fat_pivoting(self):
# full version pivoting
a = np.asarray([[8,2,5],[2,9,3]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_fat_e(self):
# economy version
a = [[8,2,3],[2,9,5]]
q,r = qr(a, mode='economic')
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
def test_simple_fat_e_pivoting(self):
# economy version pivoting
a = np.asarray([[8,2,3],[2,9,5]])
q,r,p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
q2,r2 = qr(a[:,p], mode='economic')
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_fat_left(self):
a = [[8,2,3],[2,9,5]]
q,r = qr(a, mode="economic")
c = [1, 2]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
qc,r = qr_multiply(a, identity(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_fat_left_pivoting(self):
a = [[8,2,3],[2,9,5]]
q,r,jpvt = qr(a, mode="economic", pivoting=True)
c = [1, 2]
qc,r,jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(dot(q, c), qc)
qc,r,jpvt = qr_multiply(a, identity(2), "left", True)
assert_array_almost_equal(qc, q)
def test_simple_fat_right(self):
a = [[8,2,3],[2,9,5]]
q,r = qr(a, mode="economic")
c = [1, 2]
cq,r2 = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
assert_array_almost_equal(r, r2)
cq,r = qr_multiply(a, identity(2))
assert_array_almost_equal(cq, q)
def test_simple_fat_right_pivoting(self):
a = [[8,2,3],[2,9,5]]
q,r,jpvt = qr(a, pivoting=True, mode="economic")
c = [1, 2]
cq,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), cq)
cq,r,jpvt = qr_multiply(a, identity(2), pivoting=True)
assert_array_almost_equal(cq, q)
def test_simple_complex(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_simple_complex_left(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(3), "left")
assert_array_almost_equal(q, qc)
def test_simple_complex_right(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), qc)
qc,r = qr_multiply(a, identity(3))
assert_array_almost_equal(q, qc)
def test_simple_tall_complex_left(self):
a = [[8,2+3j],[2,9],[5+7j,3]]
q,r = qr(a, mode="economic")
c = [1, 2+2j]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
c = array([1,2,0])
qc,r2 = qr_multiply(a, c, "left", overwrite_c=True)
assert_array_almost_equal(dot(q, c[:2]), qc)
qc,r = qr_multiply(a, identity(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_complex_left_conjugate(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c, "left", conjugate=True)
assert_array_almost_equal(dot(q.conjugate(), c), qc)
def test_simple_complex_tall_left_conjugate(self):
a = [[3,3+4j],[5,2+2j],[3,2]]
q,r = qr(a, mode='economic')
c = [1, 3+4j]
qc,r = qr_multiply(a, c, "left", conjugate=True)
assert_array_almost_equal(dot(q.conjugate(), c), qc)
def test_simple_complex_right_conjugate(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c, conjugate=True)
assert_array_almost_equal(dot(c, q.conjugate()), qc)
def test_simple_complex_pivoting(self):
a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_complex_left_pivoting(self):
a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3+4j]
qc,r,jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(dot(q, c), qc)
def test_simple_complex_right_pivoting(self):
a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3+4j]
qc,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), qc)
def test_random(self):
n = 20
for k in range(2):
a = random([n,n])
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a)
def test_random_left(self):
n = 20
for k in range(2):
a = random([n,n])
q,r = qr(a)
c = random([n])
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(n), "left")
assert_array_almost_equal(q, qc)
def test_random_right(self):
n = 20
for k in range(2):
a = random([n,n])
q,r = qr(a)
c = random([n])
cq,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
cq,r = qr_multiply(a, identity(n))
assert_array_almost_equal(q, cq)
def test_random_pivoting(self):
n = 20
for k in range(2):
a = random([n,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_tall(self):
# full version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a)
def test_random_tall_left(self):
# full version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a, mode="economic")
c = random([n])
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(n), "left")
assert_array_almost_equal(qc, q)
def test_random_tall_right(self):
# full version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a, mode="economic")
c = random([m])
cq,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
cq,r = qr_multiply(a, identity(m))
assert_array_almost_equal(cq, q)
def test_random_tall_pivoting(self):
# full version pivoting
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_tall_e(self):
# economy version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a, mode='economic')
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (m,n))
assert_equal(r.shape, (n,n))
def test_random_tall_e_pivoting(self):
# economy version pivoting
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r,p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a[:,p])
assert_equal(q.shape, (m,n))
assert_equal(r.shape, (n,n))
q2,r2 = qr(a[:,p], mode='economic')
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_trap(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a)
def test_random_trap_pivoting(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_complex(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r = qr(a)
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n))
assert_array_almost_equal(dot(q,r),a)
def test_random_complex_left(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r = qr(a)
c = random([n])+1j*random([n])
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(n), "left")
assert_array_almost_equal(q, qc)
def test_random_complex_right(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r = qr(a)
c = random([n])+1j*random([n])
cq,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
cq,r = qr_multiply(a, identity(n))
assert_array_almost_equal(q, cq)
def test_random_complex_pivoting(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_check_finite(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a, check_finite=False)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_lwork(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
# Get comparison values
q,r = qr(a, lwork=None)
# Test against minimum valid lwork
q2,r2 = qr(a, lwork=3)
assert_array_almost_equal(q2,q)
assert_array_almost_equal(r2,r)
# Test against larger lwork
q3,r3 = qr(a, lwork=10)
assert_array_almost_equal(q3,q)
assert_array_almost_equal(r3,r)
# Test against explicit lwork=-1
q4,r4 = qr(a, lwork=-1)
assert_array_almost_equal(q4,q)
assert_array_almost_equal(r4,r)
# Test against invalid lwork
assert_raises(Exception, qr, (a,), {'lwork':0})
assert_raises(Exception, qr, (a,), {'lwork':2})
class TestRQ(object):
def setup_method(self):
seed(1234)
def test_simple(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_r(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
r,q = rq(a)
r2 = rq(a, mode='r')
assert_array_almost_equal(r, r2)
def test_random(self):
n = 20
for k in range(2):
a = random([n,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_simple_trap(self):
a = [[8,2,3],[2,9,3]]
r,q = rq(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_simple_tall(self):
a = [[8,2],[2,9],[5,3]]
r,q = rq(a)
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(r,q),a)
def test_simple_fat(self):
a = [[8,2,5],[2,9,3]]
r,q = rq(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_simple_complex(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
r,q = rq(a)
assert_array_almost_equal(dot(q, conj(transpose(q))),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_random_tall(self):
m = 200
n = 100
for k in range(2):
a = random([m,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_random_trap(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_random_trap_economic(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
r,q = rq(a, mode='economic')
assert_array_almost_equal(dot(q,transpose(q)),identity(m))
assert_array_almost_equal(dot(r,q),a)
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (m, m))
def test_random_complex(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, conj(transpose(q))),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_random_complex_economic(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])+1j*random([m,n])
r,q = rq(a, mode='economic')
assert_array_almost_equal(dot(q,conj(transpose(q))),identity(m))
assert_array_almost_equal(dot(r,q),a)
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (m, m))
def test_check_finite(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
r,q = rq(a, check_finite=False)
assert_array_almost_equal(dot(q, transpose(q)),identity(3))
assert_array_almost_equal(dot(r,q),a)
transp = transpose
any = sometrue
class TestSchur(object):
def test_simple(self):
a = [[8,12,3],[2,9,3],[10,3,6]]
t,z = schur(a)
assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a)
tc,zc = schur(a,'complex')
assert_(any(ravel(iscomplex(zc))) and any(ravel(iscomplex(tc))))
assert_array_almost_equal(dot(dot(zc,tc),transp(conj(zc))),a)
tc2,zc2 = rsf2csf(tc,zc)
assert_array_almost_equal(dot(dot(zc2,tc2),transp(conj(zc2))),a)
def test_sort(self):
a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]]
s,u,sdim = schur(a,sort='lhp')
assert_array_almost_equal([[0.1134,0.5436,0.8316,0.],
[-0.1134,-0.8245,0.5544,0.],
[-0.8213,0.1308,0.0265,-0.5547],
[-0.5475,0.0872,0.0177,0.8321]],
u,3)
assert_array_almost_equal([[-1.4142,0.1456,-11.5816,-7.7174],
[0.,-0.5000,9.4472,-0.7184],
[0.,0.,1.4142,-0.1456],
[0.,0.,0.,0.5]],
s,3)
assert_equal(2,sdim)
s,u,sdim = schur(a,sort='rhp')
assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],
[-0.4862,0.4930,-0.1434,-0.7071],
[0.6042,0.3944,-0.6924,0.],
[0.4028,0.5986,0.6924,0.]],
u,3)
assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],
[0.,0.5,6.5809,-3.1870],
[0.,0.,-1.4142,0.9270],
[0.,0.,0.,-0.5]],
s,3)
assert_equal(2,sdim)
s,u,sdim = schur(a,sort='iuc')
assert_array_almost_equal([[0.5547,0.,-0.5721,-0.6042],
[-0.8321,0.,-0.3814,-0.4028],
[0.,0.7071,-0.5134,0.4862],
[0.,0.7071,0.5134,-0.4862]],
u,3)
assert_array_almost_equal([[-0.5000,0.0000,-6.5809,-4.0974],
[0.,0.5000,-3.3191,-14.4130],
[0.,0.,1.4142,2.1573],
[0.,0.,0.,-1.4142]],
s,3)
assert_equal(2,sdim)
s,u,sdim = schur(a,sort='ouc')
assert_array_almost_equal([[0.4862,-0.5134,0.7071,0.],
[-0.4862,0.5134,0.7071,0.],
[0.6042,0.5721,0.,-0.5547],
[0.4028,0.3814,0.,0.8321]],
u,3)
assert_array_almost_equal([[1.4142,-2.1573,14.4130,4.0974],
[0.,-1.4142,3.3191,6.5809],
[0.,0.,-0.5000,0.],
[0.,0.,0.,0.5000]],
s,3)
assert_equal(2,sdim)
rhp_function = lambda x: x >= 0.0
s,u,sdim = schur(a,sort=rhp_function)
assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],
[-0.4862,0.4930,-0.1434,-0.7071],
[0.6042,0.3944,-0.6924,0.],
[0.4028,0.5986,0.6924,0.]],
u,3)
assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],
[0.,0.5,6.5809,-3.1870],
[0.,0.,-1.4142,0.9270],
[0.,0.,0.,-0.5]],
s,3)
assert_equal(2,sdim)
def test_sort_errors(self):
a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]]
assert_raises(ValueError, schur, a, sort='unsupported')
assert_raises(ValueError, schur, a, sort=1)
def test_check_finite(self):
a = [[8,12,3],[2,9,3],[10,3,6]]
t,z = schur(a, check_finite=False)
assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a)
class TestHessenberg(object):
def test_simple(self):
a = [[-149, -50,-154],
[537, 180, 546],
[-27, -9, -25]]
h1 = [[-149.0000,42.2037,-156.3165],
[-537.6783,152.5511,-554.9272],
[0,0.0728, 2.4489]]
h,q = hessenberg(a,calc_q=1)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
assert_array_almost_equal(h,h1,decimal=4)
def test_simple_complex(self):
a = [[-149, -50,-154],
[537, 180j, 546],
[-27j, -9, -25]]
h,q = hessenberg(a,calc_q=1)
h1 = dot(transp(conj(q)),dot(a,q))
assert_array_almost_equal(h1,h)
def test_simple2(self):
a = [[1,2,3,4,5,6,7],
[0,2,3,4,6,7,2],
[0,2,2,3,0,3,2],
[0,0,2,8,0,0,2],
[0,3,1,2,0,1,2],
[0,1,2,3,0,1,0],
[0,0,0,0,0,1,2]]
h,q = hessenberg(a,calc_q=1)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
def test_simple3(self):
a = np.eye(3)
a[-1, 0] = 2
h, q = hessenberg(a, calc_q=1)
assert_array_almost_equal(dot(transp(q), dot(a, q)), h)
def test_random(self):
n = 20
for k in range(2):
a = random([n,n])
h,q = hessenberg(a,calc_q=1)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
def test_random_complex(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
h,q = hessenberg(a,calc_q=1)
h1 = dot(transp(conj(q)),dot(a,q))
assert_array_almost_equal(h1,h)
def test_check_finite(self):
a = [[-149, -50,-154],
[537, 180, 546],
[-27, -9, -25]]
h1 = [[-149.0000,42.2037,-156.3165],
[-537.6783,152.5511,-554.9272],
[0,0.0728, 2.4489]]
h,q = hessenberg(a,calc_q=1, check_finite=False)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
assert_array_almost_equal(h,h1,decimal=4)
def test_2x2(self):
a = [[2, 1], [7, 12]]
h, q = hessenberg(a, calc_q=1)
assert_array_almost_equal(q, np.eye(2))
assert_array_almost_equal(h, a)
b = [[2-7j, 1+2j], [7+3j, 12-2j]]
h2, q2 = hessenberg(b, calc_q=1)
assert_array_almost_equal(q2, np.eye(2))
assert_array_almost_equal(h2, b)
class TestQZ(object):
def setup_method(self):
seed(12345)
def test_qz_single(self):
n = 5
A = random([n,n]).astype(float32)
B = random([n,n]).astype(float32)
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
assert_array_almost_equal(dot(Q,Q.T), eye(n))
assert_array_almost_equal(dot(Z,Z.T), eye(n))
assert_(all(diag(BB) >= 0))
def test_qz_double(self):
n = 5
A = random([n,n])
B = random([n,n])
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
assert_array_almost_equal(dot(Q,Q.T), eye(n))
assert_array_almost_equal(dot(Z,Z.T), eye(n))
assert_(all(diag(BB) >= 0))
def test_qz_complex(self):
n = 5
A = random([n,n]) + 1j*random([n,n])
B = random([n,n]) + 1j*random([n,n])
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B)
assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n))
assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n))
assert_(all(diag(BB) >= 0))
assert_(all(diag(BB).imag == 0))
def test_qz_complex64(self):
n = 5
A = (random([n,n]) + 1j*random([n,n])).astype(complex64)
B = (random([n,n]) + 1j*random([n,n])).astype(complex64)
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A, decimal=5)
assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B, decimal=5)
assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n), decimal=5)
assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n), decimal=5)
assert_(all(diag(BB) >= 0))
assert_(all(diag(BB).imag == 0))
def test_qz_double_complex(self):
n = 5
A = random([n,n])
B = random([n,n])
AA,BB,Q,Z = qz(A,B, output='complex')
aa = dot(dot(Q,AA),Z.conjugate().T)
assert_array_almost_equal(aa.real, A)
assert_array_almost_equal(aa.imag, 0)
bb = dot(dot(Q,BB),Z.conjugate().T)
assert_array_almost_equal(bb.real, B)
assert_array_almost_equal(bb.imag, 0)
assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n))
assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n))
assert_(all(diag(BB) >= 0))
def test_qz_double_sort(self):
# from http://www.nag.com/lapack-ex/node119.html
# NOTE: These matrices may be ill-conditioned and lead to a
# seg fault on certain python versions when compiled with
# sse2 or sse3 older ATLAS/LAPACK binaries for windows
# A = np.array([[3.9, 12.5, -34.5, -0.5],
# [ 4.3, 21.5, -47.5, 7.5],
# [ 4.3, 21.5, -43.5, 3.5],
# [ 4.4, 26.0, -46.0, 6.0 ]])
# B = np.array([[ 1.0, 2.0, -3.0, 1.0],
# [1.0, 3.0, -5.0, 4.0],
# [1.0, 3.0, -4.0, 3.0],
# [1.0, 3.0, -4.0, 4.0]])
A = np.array([[3.9, 12.5, -34.5, 2.5],
[4.3, 21.5, -47.5, 7.5],
[4.3, 1.5, -43.5, 3.5],
[4.4, 6.0, -46.0, 6.0]])
B = np.array([[1.0, 1.0, -3.0, 1.0],
[1.0, 3.0, -5.0, 4.4],
[1.0, 2.0, -4.0, 1.0],
[1.2, 3.0, -4.0, 4.0]])
sort = lambda ar,ai,beta: ai == 0
assert_raises(ValueError, qz, A, B, sort=sort)
if False:
AA,BB,Q,Z,sdim = qz(A,B,sort=sort)
# assert_(sdim == 2)
assert_(sdim == 4)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
# test absolute values bc the sign is ambiguous and might be platform
# dependent
assert_array_almost_equal(np.abs(AA), np.abs(np.array(
[[35.7864, -80.9061, -12.0629, -9.498],
[0., 2.7638, -2.3505, 7.3256],
[0., 0., 0.6258, -0.0398],
[0., 0., 0., -12.8217]])), 4)
assert_array_almost_equal(np.abs(BB), np.abs(np.array(
[[4.5324, -8.7878, 3.2357, -3.5526],
[0., 1.4314, -2.1894, 0.9709],
[0., 0., 1.3126, -0.3468],
[0., 0., 0., 0.559]])), 4)
assert_array_almost_equal(np.abs(Q), np.abs(np.array(
[[-0.4193, -0.605, -0.1894, -0.6498],
[-0.5495, 0.6987, 0.2654, -0.3734],
[-0.4973, -0.3682, 0.6194, 0.4832],
[-0.5243, 0.1008, -0.7142, 0.4526]])), 4)
assert_array_almost_equal(np.abs(Z), np.abs(np.array(
[[-0.9471, -0.2971, -0.1217, 0.0055],
[-0.0367, 0.1209, 0.0358, 0.9913],
[0.3171, -0.9041, -0.2547, 0.1312],
[0.0346, 0.2824, -0.9587, 0.0014]])), 4)
# test absolute values bc the sign is ambiguous and might be platform
# dependent
# assert_array_almost_equal(abs(AA), abs(np.array([
# [3.8009, -69.4505, 50.3135, -43.2884],
# [0.0000, 9.2033, -0.2001, 5.9881],
# [0.0000, 0.0000, 1.4279, 4.4453],
# [0.0000, 0.0000, 0.9019, -1.1962]])), 4)
# assert_array_almost_equal(abs(BB), abs(np.array([
# [1.9005, -10.2285, 0.8658, -5.2134],
# [0.0000, 2.3008, 0.7915, 0.4262],
# [0.0000, 0.0000, 0.8101, 0.0000],
# [0.0000, 0.0000, 0.0000, -0.2823]])), 4)
# assert_array_almost_equal(abs(Q), abs(np.array([
# [0.4642, 0.7886, 0.2915, -0.2786],
# [0.5002, -0.5986, 0.5638, -0.2713],
# [0.5002, 0.0154, -0.0107, 0.8657],
# [0.5331, -0.1395, -0.7727, -0.3151]])), 4)
# assert_array_almost_equal(dot(Q,Q.T), eye(4))
# assert_array_almost_equal(abs(Z), abs(np.array([
# [0.9961, -0.0014, 0.0887, -0.0026],
# [0.0057, -0.0404, -0.0938, -0.9948],
# [0.0626, 0.7194, -0.6908, 0.0363],
# [0.0626, -0.6934, -0.7114, 0.0956]])), 4)
# assert_array_almost_equal(dot(Z,Z.T), eye(4))
# def test_qz_complex_sort(self):
# cA = np.array([
# [-21.10+22.50*1j, 53.50+-50.50*1j, -34.50+127.50*1j, 7.50+ 0.50*1j],
# [-0.46+ -7.78*1j, -3.50+-37.50*1j, -15.50+ 58.50*1j,-10.50+ -1.50*1j],
# [ 4.30+ -5.50*1j, 39.70+-17.10*1j, -68.50+ 12.50*1j, -7.50+ -3.50*1j],
# [ 5.50+ 4.40*1j, 14.40+ 43.30*1j, -32.50+-46.00*1j,-19.00+-32.50*1j]])
# cB = np.array([
# [1.00+ -5.00*1j, 1.60+ 1.20*1j,-3.00+ 0.00*1j, 0.00+ -1.00*1j],
# [0.80+ -0.60*1j, 3.00+ -5.00*1j,-4.00+ 3.00*1j,-2.40+ -3.20*1j],
# [1.00+ 0.00*1j, 2.40+ 1.80*1j,-4.00+ -5.00*1j, 0.00+ -3.00*1j],
# [0.00+ 1.00*1j,-1.80+ 2.40*1j, 0.00+ -4.00*1j, 4.00+ -5.00*1j]])
# AAS,BBS,QS,ZS,sdim = qz(cA,cB,sort='lhp')
# eigenvalues = diag(AAS)/diag(BBS)
# assert_(all(np.real(eigenvalues[:sdim] < 0)))
# assert_(all(np.real(eigenvalues[sdim:] > 0)))
def test_check_finite(self):
n = 5
A = random([n,n])
B = random([n,n])
AA,BB,Q,Z = qz(A,B,check_finite=False)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
assert_array_almost_equal(dot(Q,Q.T), eye(n))
assert_array_almost_equal(dot(Z,Z.T), eye(n))
assert_(all(diag(BB) >= 0))
def _make_pos(X):
# the decompositions can have different signs than verified results
return np.sign(X)*X
class TestOrdQZ(object):
@classmethod
def setup_class(cls):
# http://www.nag.com/lapack-ex/node119.html
A1 = np.array([[-21.10 - 22.50j, 53.5 - 50.5j, -34.5 + 127.5j,
7.5 + 0.5j],
[-0.46 - 7.78j, -3.5 - 37.5j, -15.5 + 58.5j,
-10.5 - 1.5j],
[4.30 - 5.50j, 39.7 - 17.1j, -68.5 + 12.5j,
-7.5 - 3.5j],
[5.50 + 4.40j, 14.4 + 43.3j, -32.5 - 46.0j,
-19.0 - 32.5j]])
B1 = np.array([[1.0 - 5.0j, 1.6 + 1.2j, -3 + 0j, 0.0 - 1.0j],
[0.8 - 0.6j, .0 - 5.0j, -4 + 3j, -2.4 - 3.2j],
[1.0 + 0.0j, 2.4 + 1.8j, -4 - 5j, 0.0 - 3.0j],
[0.0 + 1.0j, -1.8 + 2.4j, 0 - 4j, 4.0 - 5.0j]])
# http://www.nag.com/numeric/fl/nagdoc_fl23/xhtml/F08/f08yuf.xml
A2 = np.array([[3.9, 12.5, -34.5, -0.5],
[4.3, 21.5, -47.5, 7.5],
[4.3, 21.5, -43.5, 3.5],
[4.4, 26.0, -46.0, 6.0]])
B2 = np.array([[1, 2, -3, 1],
[1, 3, -5, 4],
[1, 3, -4, 3],
[1, 3, -4, 4]])
# example with the eigenvalues
# -0.33891648, 1.61217396+0.74013521j, 1.61217396-0.74013521j,
# 0.61244091
# thus featuring:
# * one complex conjugate eigenvalue pair,
# * one eigenvalue in the lhp
# * 2 eigenvalues in the unit circle
# * 2 non-real eigenvalues
A3 = np.array([[5., 1., 3., 3.],
[4., 4., 2., 7.],
[7., 4., 1., 3.],
[0., 4., 8., 7.]])
B3 = np.array([[8., 10., 6., 10.],
[7., 7., 2., 9.],
[9., 1., 6., 6.],
[5., 1., 4., 7.]])
# example with infinite eigenvalues
A4 = np.eye(2)
B4 = np.diag([0, 1])
# example with (alpha, beta) = (0, 0)
A5 = np.diag([1, 0])
B5 = np.diag([1, 0])
cls.A = [A1, A2, A3, A4, A5]
cls.B = [B1, B2, B3, B4, A5]
def qz_decomp(self, sort):
try:
olderr = np.seterr('raise')
ret = [ordqz(Ai, Bi, sort=sort) for Ai, Bi in zip(self.A, self.B)]
finally:
np.seterr(**olderr)
return tuple(ret)
def check(self, A, B, sort, AA, BB, alpha, beta, Q, Z):
Id = np.eye(*A.shape)
# make sure Q and Z are orthogonal
assert_array_almost_equal(Q.dot(Q.T.conj()), Id)
assert_array_almost_equal(Z.dot(Z.T.conj()), Id)
# check factorization
assert_array_almost_equal(Q.dot(AA), A.dot(Z))
assert_array_almost_equal(Q.dot(BB), B.dot(Z))
# check shape of AA and BB
assert_array_equal(np.tril(AA, -2), np.zeros(AA.shape))
assert_array_equal(np.tril(BB, -1), np.zeros(BB.shape))
# check eigenvalues
for i in range(A.shape[0]):
# does the current diagonal element belong to a 2-by-2 block
# that was already checked?
if i > 0 and A[i, i - 1] != 0:
continue
# take care of 2-by-2 blocks
if i < AA.shape[0] - 1 and AA[i + 1, i] != 0:
evals, _ = eig(AA[i:i + 2, i:i + 2], BB[i:i + 2, i:i + 2])
# make sure the pair of complex conjugate eigenvalues
# is ordered consistently (positive imaginary part first)
if evals[0].imag < 0:
evals = evals[[1, 0]]
tmp = alpha[i:i + 2]/beta[i:i + 2]
if tmp[0].imag < 0:
tmp = tmp[[1, 0]]
assert_array_almost_equal(evals, tmp)
else:
if alpha[i] == 0 and beta[i] == 0:
assert_equal(AA[i, i], 0)
assert_equal(BB[i, i], 0)
elif beta[i] == 0:
assert_equal(BB[i, i], 0)
else:
assert_almost_equal(AA[i, i]/BB[i, i], alpha[i]/beta[i])
sortfun = _select_function(sort)
lastsort = True
for i in range(A.shape[0]):
cursort = sortfun(np.array([alpha[i]]), np.array([beta[i]]))
# once the sorting criterion was not matched all subsequent
# eigenvalues also shouldn't match
if not lastsort:
assert(not cursort)
lastsort = cursort
def check_all(self, sort):
ret = self.qz_decomp(sort)
for reti, Ai, Bi in zip(ret, self.A, self.B):
self.check(Ai, Bi, sort, *reti)
def test_lhp(self):
self.check_all('lhp')
def test_rhp(self):
self.check_all('rhp')
def test_iuc(self):
self.check_all('iuc')
def test_ouc(self):
self.check_all('ouc')
def test_ref(self):
# real eigenvalues first (top-left corner)
def sort(x, y):
out = np.empty_like(x, dtype=bool)
nonzero = (y != 0)
out[~nonzero] = False
out[nonzero] = (x[nonzero]/y[nonzero]).imag == 0
return out
self.check_all(sort)
def test_cef(self):
# complex eigenvalues first (top-left corner)
def sort(x, y):
out = np.empty_like(x, dtype=bool)
nonzero = (y != 0)
out[~nonzero] = False
out[nonzero] = (x[nonzero]/y[nonzero]).imag != 0
return out
self.check_all(sort)
def test_diff_input_types(self):
ret = ordqz(self.A[1], self.B[2], sort='lhp')
self.check(self.A[1], self.B[2], 'lhp', *ret)
ret = ordqz(self.B[2], self.A[1], sort='lhp')
self.check(self.B[2], self.A[1], 'lhp', *ret)
def test_sort_explicit(self):
# Test order of the eigenvalues in the 2 x 2 case where we can
# explicitly compute the solution
A1 = np.eye(2)
B1 = np.diag([-2, 0.5])
expected1 = [('lhp', [-0.5, 2]),
('rhp', [2, -0.5]),
('iuc', [-0.5, 2]),
('ouc', [2, -0.5])]
A2 = np.eye(2)
B2 = np.diag([-2 + 1j, 0.5 + 0.5j])
expected2 = [('lhp', [1/(-2 + 1j), 1/(0.5 + 0.5j)]),
('rhp', [1/(0.5 + 0.5j), 1/(-2 + 1j)]),
('iuc', [1/(-2 + 1j), 1/(0.5 + 0.5j)]),
('ouc', [1/(0.5 + 0.5j), 1/(-2 + 1j)])]
# 'lhp' is ambiguous so don't test it
A3 = np.eye(2)
B3 = np.diag([2, 0])
expected3 = [('rhp', [0.5, np.inf]),
('iuc', [0.5, np.inf]),
('ouc', [np.inf, 0.5])]
# 'rhp' is ambiguous so don't test it
A4 = np.eye(2)
B4 = np.diag([-2, 0])
expected4 = [('lhp', [-0.5, np.inf]),
('iuc', [-0.5, np.inf]),
('ouc', [np.inf, -0.5])]
A5 = np.diag([0, 1])
B5 = np.diag([0, 0.5])
# 'lhp' and 'iuc' are ambiguous so don't test them
expected5 = [('rhp', [2, np.nan]),
('ouc', [2, np.nan])]
A = [A1, A2, A3, A4, A5]
B = [B1, B2, B3, B4, B5]
expected = [expected1, expected2, expected3, expected4, expected5]
for Ai, Bi, expectedi in zip(A, B, expected):
for sortstr, expected_eigvals in expectedi:
_, _, alpha, beta, _, _ = ordqz(Ai, Bi, sort=sortstr)
azero = (alpha == 0)
bzero = (beta == 0)
x = np.empty_like(alpha)
x[azero & bzero] = np.nan
x[~azero & bzero] = np.inf
x[~bzero] = alpha[~bzero]/beta[~bzero]
assert_allclose(expected_eigvals, x)
class TestOrdQZWorkspaceSize(object):
def setup_method(self):
seed(12345)
def test_decompose(self):
N = 202
# raises error if lwork parameter to dtrsen is too small
for ddtype in [np.float32, np.float64]:
A = random((N,N)).astype(ddtype)
B = random((N,N)).astype(ddtype)
# sort = lambda alphar, alphai, beta: alphar**2 + alphai**2< beta**2
sort = lambda alpha, beta: alpha < beta
[S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='real')
for ddtype in [np.complex, np.complex64]:
A = random((N,N)).astype(ddtype)
B = random((N,N)).astype(ddtype)
sort = lambda alpha, beta: alpha < beta
[S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='complex')
@pytest.mark.slow
def test_decompose_ouc(self):
N = 202
# segfaults if lwork parameter to dtrsen is too small
for ddtype in [np.float32, np.float64, np.complex, np.complex64]:
A = random((N,N)).astype(ddtype)
B = random((N,N)).astype(ddtype)
[S,T,alpha,beta,U,V] = ordqz(A,B,sort='ouc')
class TestDatacopied(object):
def test_datacopied(self):
from scipy.linalg.decomp import _datacopied
M = matrix([[0,1],[2,3]])
A = asarray(M)
L = M.tolist()
M2 = M.copy()
class Fake1:
def __array__(self):
return A
class Fake2:
__array_interface__ = A.__array_interface__
F1 = Fake1()
F2 = Fake2()
for item, status in [(M, False), (A, False), (L, True),
(M2, False), (F1, False), (F2, False)]:
arr = asarray(item)
assert_equal(_datacopied(arr, item), status,
err_msg=repr(item))
def test_aligned_mem_float():
"""Check linalg works with non-aligned memory"""
# Allocate 402 bytes of memory (allocated on boundary)
a = arange(402, dtype=np.uint8)
# Create an array with boundary offset 4
z = np.frombuffer(a.data, offset=2, count=100, dtype=float32)
z.shape = 10, 10
eig(z, overwrite_a=True)
eig(z.T, overwrite_a=True)
def test_aligned_mem():
"""Check linalg works with non-aligned memory"""
# Allocate 804 bytes of memory (allocated on boundary)
a = arange(804, dtype=np.uint8)
# Create an array with boundary offset 4
z = np.frombuffer(a.data, offset=4, count=100, dtype=float)
z.shape = 10, 10
eig(z, overwrite_a=True)
eig(z.T, overwrite_a=True)
def test_aligned_mem_complex():
"""Check that complex objects don't need to be completely aligned"""
# Allocate 1608 bytes of memory (allocated on boundary)
a = zeros(1608, dtype=np.uint8)
# Create an array with boundary offset 8
z = np.frombuffer(a.data, offset=8, count=100, dtype=complex)
z.shape = 10, 10
eig(z, overwrite_a=True)
# This does not need special handling
eig(z.T, overwrite_a=True)
def check_lapack_misaligned(func, args, kwargs):
args = list(args)
for i in range(len(args)):
a = args[:]
if isinstance(a[i],np.ndarray):
# Try misaligning a[i]
aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8)
aa = np.frombuffer(aa.data, offset=4, count=a[i].size, dtype=a[i].dtype)
aa.shape = a[i].shape
aa[...] = a[i]
a[i] = aa
func(*a,**kwargs)
if len(a[i].shape) > 1:
a[i] = a[i].T
func(*a,**kwargs)
@pytest.mark.xfail(run=False, reason="Ticket #1152, triggers a segfault in rare cases.")
def test_lapack_misaligned():
M = np.eye(10,dtype=float)
R = np.arange(100)
R.shape = 10,10
S = np.arange(20000,dtype=np.uint8)
S = np.frombuffer(S.data, offset=4, count=100, dtype=float)
S.shape = 10, 10
b = np.ones(10)
LU, piv = lu_factor(S)
for (func, args, kwargs) in [
(eig,(S,),dict(overwrite_a=True)), # crash
(eigvals,(S,),dict(overwrite_a=True)), # no crash
(lu,(S,),dict(overwrite_a=True)), # no crash
(lu_factor,(S,),dict(overwrite_a=True)), # no crash
(lu_solve,((LU,piv),b),dict(overwrite_b=True)),
(solve,(S,b),dict(overwrite_a=True,overwrite_b=True)),
(svd,(M,),dict(overwrite_a=True)), # no crash
(svd,(R,),dict(overwrite_a=True)), # no crash
(svd,(S,),dict(overwrite_a=True)), # crash
(svdvals,(S,),dict()), # no crash
(svdvals,(S,),dict(overwrite_a=True)), # crash
(cholesky,(M,),dict(overwrite_a=True)), # no crash
(qr,(S,),dict(overwrite_a=True)), # crash
(rq,(S,),dict(overwrite_a=True)), # crash
(hessenberg,(S,),dict(overwrite_a=True)), # crash
(schur,(S,),dict(overwrite_a=True)), # crash
]:
check_lapack_misaligned(func, args, kwargs)
# not properly tested
# cholesky, rsf2csf, lu_solve, solve, eig_banded, eigvals_banded, eigh, diagsvd
class TestOverwrite(object):
def test_eig(self):
assert_no_overwrite(eig, [(3,3)])
assert_no_overwrite(eig, [(3,3), (3,3)])
def test_eigh(self):
assert_no_overwrite(eigh, [(3,3)])
assert_no_overwrite(eigh, [(3,3), (3,3)])
def test_eig_banded(self):
assert_no_overwrite(eig_banded, [(3,2)])
def test_eigvals(self):
assert_no_overwrite(eigvals, [(3,3)])
def test_eigvalsh(self):
assert_no_overwrite(eigvalsh, [(3,3)])
def test_eigvals_banded(self):
assert_no_overwrite(eigvals_banded, [(3,2)])
def test_hessenberg(self):
assert_no_overwrite(hessenberg, [(3,3)])
def test_lu_factor(self):
assert_no_overwrite(lu_factor, [(3,3)])
def test_lu_solve(self):
x = np.array([[1,2,3], [4,5,6], [7,8,8]])
xlu = lu_factor(x)
assert_no_overwrite(lambda b: lu_solve(xlu, b), [(3,)])
def test_lu(self):
assert_no_overwrite(lu, [(3,3)])
def test_qr(self):
assert_no_overwrite(qr, [(3,3)])
def test_rq(self):
assert_no_overwrite(rq, [(3,3)])
def test_schur(self):
assert_no_overwrite(schur, [(3,3)])
def test_schur_complex(self):
assert_no_overwrite(lambda a: schur(a, 'complex'), [(3,3)],
dtypes=[np.float32, np.float64])
def test_svd(self):
assert_no_overwrite(svd, [(3,3)])
assert_no_overwrite(lambda a: svd(a, lapack_driver='gesvd'), [(3,3)])
def test_svdvals(self):
assert_no_overwrite(svdvals, [(3,3)])
def _check_orth(n, dtype, skip_big=False):
X = np.ones((n, 2), dtype=float).astype(dtype)
eps = np.finfo(dtype).eps
tol = 1000 * eps
Y = orth(X)
assert_equal(Y.shape, (n, 1))
assert_allclose(Y, Y.mean(), atol=tol)
Y = orth(X.T)
assert_equal(Y.shape, (2, 1))
assert_allclose(Y, Y.mean(), atol=tol)
if n > 5 and not skip_big:
np.random.seed(1)
X = np.random.rand(n, 5).dot(np.random.rand(5, n))
X = X + 1e-4 * np.random.rand(n, 1).dot(np.random.rand(1, n))
X = X.astype(dtype)
Y = orth(X, rcond=1e-3)
assert_equal(Y.shape, (n, 5))
Y = orth(X, rcond=1e-6)
assert_equal(Y.shape, (n, 5 + 1))
@pytest.mark.slow
@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8, reason="test only on 64-bit, else too slow")
def test_orth_memory_efficiency():
# Pick n so that 16*n bytes is reasonable but 8*n*n bytes is unreasonable.
# Keep in mind that @pytest.mark.slow tests are likely to be running
# under configurations that support 4Gb+ memory for tests related to
# 32 bit overflow.
n = 10*1000*1000
try:
_check_orth(n, np.float64, skip_big=True)
except MemoryError:
raise AssertionError('memory error perhaps caused by orth regression')
def test_orth():
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
sizes = [1, 2, 3, 10, 100]
for dt, n in itertools.product(dtypes, sizes):
_check_orth(n, dt)
def test_null_space():
np.random.seed(1)
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
sizes = [1, 2, 3, 10, 100]
for dt, n in itertools.product(dtypes, sizes):
X = np.ones((2, n), dtype=dt)
eps = np.finfo(dt).eps
tol = 1000 * eps
Y = null_space(X)
assert_equal(Y.shape, (n, n-1))
assert_allclose(X.dot(Y), 0, atol=tol)
Y = null_space(X.T)
assert_equal(Y.shape, (2, 1))
assert_allclose(X.T.dot(Y), 0, atol=tol)
X = np.random.randn(1 + n//2, n)
Y = null_space(X)
assert_equal(Y.shape, (n, n - 1 - n//2))
assert_allclose(X.dot(Y), 0, atol=tol)
if n > 5:
np.random.seed(1)
X = np.random.rand(n, 5).dot(np.random.rand(5, n))
X = X + 1e-4 * np.random.rand(n, 1).dot(np.random.rand(1, n))
X = X.astype(dt)
Y = null_space(X, rcond=1e-3)
assert_equal(Y.shape, (n, n - 5))
Y = null_space(X, rcond=1e-6)
assert_equal(Y.shape, (n, n - 6))
def test_subspace_angles():
H = hadamard(8, float)
A = H[:, :3]
B = H[:, 3:]
assert_allclose(subspace_angles(A, B), [np.pi / 2.] * 3, atol=1e-14)
assert_allclose(subspace_angles(B, A), [np.pi / 2.] * 3, atol=1e-14)
for x in (A, B):
assert_allclose(subspace_angles(x, x), np.zeros(x.shape[1]),
atol=1e-14)
# From MATLAB function "subspace", which effectively only returns the
# last value that we calculate
x = np.array(
[[0.537667139546100, 0.318765239858981, 3.578396939725760, 0.725404224946106], # noqa: E501
[1.833885014595086, -1.307688296305273, 2.769437029884877, -0.063054873189656], # noqa: E501
[-2.258846861003648, -0.433592022305684, -1.349886940156521, 0.714742903826096], # noqa: E501
[0.862173320368121, 0.342624466538650, 3.034923466331855, -0.204966058299775]]) # noqa: E501
expected = 1.481454682101605
assert_allclose(subspace_angles(x[:, :2], x[:, 2:])[0], expected,
rtol=1e-12)
assert_allclose(subspace_angles(x[:, 2:], x[:, :2])[0], expected,
rtol=1e-12)
expected = 0.746361174247302
assert_allclose(subspace_angles(x[:, :2], x[:, [2]]), expected, rtol=1e-12)
assert_allclose(subspace_angles(x[:, [2]], x[:, :2]), expected, rtol=1e-12)
expected = 0.487163718534313
assert_allclose(subspace_angles(x[:, :3], x[:, [3]]), expected, rtol=1e-12)
assert_allclose(subspace_angles(x[:, [3]], x[:, :3]), expected, rtol=1e-12)
expected = 0.328950515907756
assert_allclose(subspace_angles(x[:, :2], x[:, 1:]), [expected, 0],
atol=1e-12)
# Degenerate conditions
assert_raises(ValueError, subspace_angles, x[0], x)
assert_raises(ValueError, subspace_angles, x, x[0])
assert_raises(ValueError, subspace_angles, x[:-1], x)
class TestCDF2RDF(object):
def matmul(self, a, b):
return np.einsum('...ij,...jk->...ik', a, b)
def assert_eig_valid(self, w, v, x):
assert_array_almost_equal(
self.matmul(v, w),
self.matmul(x, v)
)
def test_single_array0x0real(self):
# eig doesn't support 0x0 in old versions of numpy
X = np.empty((0, 0))
w, v = np.empty(0), np.empty((0, 0))
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_single_array2x2_real(self):
X = np.array([[1, 2], [3, -1]])
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_single_array2x2_complex(self):
X = np.array([[1, 2], [-2, 1]])
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_single_array3x3_real(self):
X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6]])
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_single_array3x3_complex(self):
X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]])
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_random_1d_stacked_arrays(self):
# cannot test M == 0 due to bug in old numpy
for M in range(1, 7):
X = np.random.rand(100, M, M)
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_random_2d_stacked_arrays(self):
# cannot test M == 0 due to bug in old numpy
for M in range(1, 7):
X = np.random.rand(10, 10, M, M)
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_low_dimensionality_error(self):
w, v = np.empty(()), np.array((2,))
assert_raises(ValueError, cdf2rdf, w, v)
def test_not_square_error(self):
# Check that passing a non-square array raises a ValueError.
w, v = np.arange(3), np.arange(6).reshape(3,2)
assert_raises(ValueError, cdf2rdf, w, v)
def test_swapped_v_w_error(self):
# Check that exchanging places of w and v raises ValueError.
X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]])
w, v = np.linalg.eig(X)
assert_raises(ValueError, cdf2rdf, v, w)
def test_non_associated_error(self):
# Check that passing non-associated eigenvectors raises a ValueError.
w, v = np.arange(3), np.arange(16).reshape(4,4)
assert_raises(ValueError, cdf2rdf, w, v)
def test_not_conjugate_pairs(self):
# Check that passing non-conjugate pairs raises a ValueError.
X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]])
w, v = np.linalg.eig(X)
assert_raises(ValueError, cdf2rdf, w, v)
# different arrays in the stack, so not conjugate
X = np.array([
[[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]],
[[1, 2, 3], [1, 2, 3], [2, 5, 6-1j]],
])
w, v = np.linalg.eig(X)
assert_raises(ValueError, cdf2rdf, w, v)
|
import os
import re
def listMetrics(storage_dir, metric_suffix='wsp'):
metric_regex = re.compile(".*\.%s$" % metric_suffix)
for root, dirnames, filenames in os.walk(storage_dir):
for filename in filenames:
if metric_regex.match(filename):
root_path = root[len(storage_dir) + 1:]
m_path = os.path.join(root_path, filename)
m_name, m_ext = os.path.splitext(m_path)
m_name = m_name.replace('/', '.')
yield m_name
Strip dir separators that might be dangling
import os
import re
def listMetrics(storage_dir, metric_suffix='wsp'):
metric_regex = re.compile(".*\.%s$" % metric_suffix)
storage_dir = storage_dir.rstrip(os.sep)
for root, dirnames, filenames in os.walk(storage_dir):
for filename in filenames:
if metric_regex.match(filename):
root_path = root[len(storage_dir) + 1:]
m_path = os.path.join(root_path, filename)
m_name, m_ext = os.path.splitext(m_path)
m_name = m_name.replace('/', '.')
yield m_name
|
import copy
import errno
import os
from invoke.config import Config as InvokeConfig, merge_dicts
from paramiko.config import SSHConfig
from .util import get_local_user, debug
class Config(InvokeConfig):
"""
An `invoke.config.Config` subclass with extra Fabric-related behavior.
This class behaves like `invoke.config.Config` in every way, with the
following exceptions:
- its `global_defaults` staticmethod has been extended to add/modify some
default settings (see its documentation, below, for details);
- it accepts additional instantiation arguments related to loading
``ssh_config`` files.
- it triggers loading of Fabric-specific env vars (e.g.
``FABRIC_RUN_HIDE=true`` instead of ``INVOKE_RUN_HIDE=true``) and
filenames (e.g. ``/etc/fabric.yaml`` instead of ``/etc/invoke.yaml``).
Intended for use with `.Connection`, as using vanilla
`invoke.config.Config` objects would require users to manually define
``port``, ``user`` and so forth.
.. seealso:: :doc:`/concepts/configuration`, :ref:`ssh-config`
"""
prefix = 'fabric'
def __init__(self, *args, **kwargs):
"""
Creates a new Fabric-specific config object.
For most API details, see `invoke.config.Config.__init__`. Parameters
new to this subclass are listed below.
:param ssh_config:
Custom/explicit `paramiko.config.SSHConfig` object. If given,
prevents loading of any SSH config files. Default: ``None``.
:param str runtime_ssh_path:
Runtime SSH config path to load. Prevents loading of system/user
files if given. Default: ``None``.
:param str system_ssh_path:
Location of the system-level SSH config file. Default:
``/etc/ssh/ssh_config``.
:param str user_ssh_path:
Location of the user-level SSH config file. Default:
``~/.ssh/config``.
"""
# Tease out our own kwargs.
# TODO: consider moving more stuff out of __init__ and into methods so
# there's less of this sort of splat-args + pop thing? Eh.
ssh_config = kwargs.pop('ssh_config', None)
self._set(_runtime_ssh_path=kwargs.pop('runtime_ssh_path', None))
system_path = kwargs.pop('system_ssh_path', '/etc/ssh/ssh_config')
self._set(_system_ssh_path=system_path)
self._set(_user_ssh_path=kwargs.pop('user_ssh_path', '~/.ssh/config'))
# Record whether we were given an explicit object (so other steps know
# whether to bother loading from disk or not)
# This needs doing before super __init__ as that calls our post_init
explicit = ssh_config is not None
self._set(_given_explicit_object=explicit)
# Arrive at some non-None SSHConfig object (upon which to run .parse()
# later, in _load_ssh_file())
if ssh_config is None:
ssh_config = SSHConfig()
self._set(base_ssh_config=ssh_config)
# Now that our own attributes have been prepared, we can fall up into
# parent __init__(), which will trigger post_init() (which needs the
# attributes we just set up)
super(Config, self).__init__(*args, **kwargs)
def post_init(self):
super(Config, self).post_init()
# Now that regular config is loaded, we can update the runtime SSH
# config path
if self.ssh_config_path:
self._runtime_ssh_path = self.ssh_config_path
# Load files from disk, if necessary
if not self._given_explicit_object:
self.load_ssh_files()
def clone(self, *args, **kwargs):
# TODO: clone() at this point kinda-sorta feels like it's retreading
# __reduce__ and the related (un)pickling stuff...
# Get cloned obj.
# NOTE: Because we also extend .init_kwargs, the actual core SSHConfig
# data is passed in at init time (ensuring no files get loaded a 2nd,
# etc time) and will already be present, so we don't need to set
# .base_ssh_config ourselves. Similarly, there's no need to worry about
# how the SSH config paths may be inaccurate until below; nothing will
# be referencing them.
new = super(Config, self).clone(*args, **kwargs)
# Copy over our custom attributes, so that the clone still resembles us
# re: recording where the data originally came from (in case anything
# re-runs .load_ssh_files(), for example).
for attr in (
'_runtime_ssh_path',
'_system_ssh_path',
'_user_ssh_path',
):
setattr(new, attr, getattr(self, attr))
# All done
return new
def _clone_init_kwargs(self, *args, **kw):
# Parent kwargs
kwargs = super(Config, self)._clone_init_kwargs(*args, **kw)
# Transmit our internal SSHConfig via explicit-obj kwarg, thus
# bypassing any file loading. (Our extension of clone() above copies
# over other attributes as well so that the end result looks consistent
# with reality.)
new_config = SSHConfig()
# TODO: as with other spots, this implies SSHConfig needs a cleaner
# public API re: creating and updating its core data.
new_config._config = copy.deepcopy(self.base_ssh_config._config)
return dict(
kwargs,
ssh_config=new_config,
)
def load_ssh_files(self):
"""
Trigger loading of configured SSH config file paths.
Expects that ``base_ssh_config`` has already been set to an
`~paramiko.config.SSHConfig` object.
:returns: ``None``.
"""
if self._runtime_ssh_path is not None:
path = self._runtime_ssh_path
# Manually blow up like open() (_load_ssh_file normally doesn't)
if not os.path.exists(path):
msg = "No such file or directory: {0!r}".format(path)
raise IOError(errno.ENOENT, msg)
self._load_ssh_file(os.path.expanduser(path))
elif self.load_ssh_configs:
for path in (self._user_ssh_path, self._system_ssh_path):
self._load_ssh_file(os.path.expanduser(path))
def _load_ssh_file(self, path):
"""
Attempt to open and parse an SSH config file at ``path``.
Does nothing if ``path`` is not a path to a valid file.
:returns: ``None``.
"""
if os.path.isfile(path):
old_rules = len(self.base_ssh_config._config)
with open(path) as fd:
self.base_ssh_config.parse(fd)
new_rules = len(self.base_ssh_config._config)
msg = "Loaded {0} new ssh_config rules from {1!r}"
debug(msg.format(new_rules - old_rules, path))
else:
debug("File not found, skipping")
@staticmethod
def global_defaults():
"""
Default configuration values and behavior toggles.
Fabric only extends this method in order to make minor adjustments and
additions to Invoke's `~invoke.config.Config.global_defaults`; see its
documentation for the base values, such as the config subtrees
controlling behavior of ``run`` or how ``tasks`` behave.
For Fabric-specific modifications and additions to the Invoke-level
defaults, see our own config docs at :ref:`default-values`.
"""
# TODO: is it worth moving all of our 'new' settings to a discrete
# namespace for cleanliness' sake? e.g. ssh.port, ssh.user etc.
# It wouldn't actually simplify this code any, but it would make it
# easier for users to determine what came from which library/repo.
defaults = InvokeConfig.global_defaults()
ours = {
# New settings
'port': 22,
'user': get_local_user(),
'forward_agent': False,
'gateway': None,
'load_ssh_configs': True,
'connect_kwargs': {},
# TODO: this becomes an override once Invoke grows execution
# timeouts (which should be timeouts.execute)
'timeouts': {
'connect': None,
},
'ssh_config_path': None,
# Overrides of existing settings
'run': {
'replace_env': True,
},
}
merge_dicts(defaults, ours)
return defaults
Sort Fabric-level config defaults
import copy
import errno
import os
from invoke.config import Config as InvokeConfig, merge_dicts
from paramiko.config import SSHConfig
from .util import get_local_user, debug
class Config(InvokeConfig):
"""
An `invoke.config.Config` subclass with extra Fabric-related behavior.
This class behaves like `invoke.config.Config` in every way, with the
following exceptions:
- its `global_defaults` staticmethod has been extended to add/modify some
default settings (see its documentation, below, for details);
- it accepts additional instantiation arguments related to loading
``ssh_config`` files.
- it triggers loading of Fabric-specific env vars (e.g.
``FABRIC_RUN_HIDE=true`` instead of ``INVOKE_RUN_HIDE=true``) and
filenames (e.g. ``/etc/fabric.yaml`` instead of ``/etc/invoke.yaml``).
Intended for use with `.Connection`, as using vanilla
`invoke.config.Config` objects would require users to manually define
``port``, ``user`` and so forth.
.. seealso:: :doc:`/concepts/configuration`, :ref:`ssh-config`
"""
prefix = 'fabric'
def __init__(self, *args, **kwargs):
"""
Creates a new Fabric-specific config object.
For most API details, see `invoke.config.Config.__init__`. Parameters
new to this subclass are listed below.
:param ssh_config:
Custom/explicit `paramiko.config.SSHConfig` object. If given,
prevents loading of any SSH config files. Default: ``None``.
:param str runtime_ssh_path:
Runtime SSH config path to load. Prevents loading of system/user
files if given. Default: ``None``.
:param str system_ssh_path:
Location of the system-level SSH config file. Default:
``/etc/ssh/ssh_config``.
:param str user_ssh_path:
Location of the user-level SSH config file. Default:
``~/.ssh/config``.
"""
# Tease out our own kwargs.
# TODO: consider moving more stuff out of __init__ and into methods so
# there's less of this sort of splat-args + pop thing? Eh.
ssh_config = kwargs.pop('ssh_config', None)
self._set(_runtime_ssh_path=kwargs.pop('runtime_ssh_path', None))
system_path = kwargs.pop('system_ssh_path', '/etc/ssh/ssh_config')
self._set(_system_ssh_path=system_path)
self._set(_user_ssh_path=kwargs.pop('user_ssh_path', '~/.ssh/config'))
# Record whether we were given an explicit object (so other steps know
# whether to bother loading from disk or not)
# This needs doing before super __init__ as that calls our post_init
explicit = ssh_config is not None
self._set(_given_explicit_object=explicit)
# Arrive at some non-None SSHConfig object (upon which to run .parse()
# later, in _load_ssh_file())
if ssh_config is None:
ssh_config = SSHConfig()
self._set(base_ssh_config=ssh_config)
# Now that our own attributes have been prepared, we can fall up into
# parent __init__(), which will trigger post_init() (which needs the
# attributes we just set up)
super(Config, self).__init__(*args, **kwargs)
def post_init(self):
super(Config, self).post_init()
# Now that regular config is loaded, we can update the runtime SSH
# config path
if self.ssh_config_path:
self._runtime_ssh_path = self.ssh_config_path
# Load files from disk, if necessary
if not self._given_explicit_object:
self.load_ssh_files()
def clone(self, *args, **kwargs):
# TODO: clone() at this point kinda-sorta feels like it's retreading
# __reduce__ and the related (un)pickling stuff...
# Get cloned obj.
# NOTE: Because we also extend .init_kwargs, the actual core SSHConfig
# data is passed in at init time (ensuring no files get loaded a 2nd,
# etc time) and will already be present, so we don't need to set
# .base_ssh_config ourselves. Similarly, there's no need to worry about
# how the SSH config paths may be inaccurate until below; nothing will
# be referencing them.
new = super(Config, self).clone(*args, **kwargs)
# Copy over our custom attributes, so that the clone still resembles us
# re: recording where the data originally came from (in case anything
# re-runs .load_ssh_files(), for example).
for attr in (
'_runtime_ssh_path',
'_system_ssh_path',
'_user_ssh_path',
):
setattr(new, attr, getattr(self, attr))
# All done
return new
def _clone_init_kwargs(self, *args, **kw):
# Parent kwargs
kwargs = super(Config, self)._clone_init_kwargs(*args, **kw)
# Transmit our internal SSHConfig via explicit-obj kwarg, thus
# bypassing any file loading. (Our extension of clone() above copies
# over other attributes as well so that the end result looks consistent
# with reality.)
new_config = SSHConfig()
# TODO: as with other spots, this implies SSHConfig needs a cleaner
# public API re: creating and updating its core data.
new_config._config = copy.deepcopy(self.base_ssh_config._config)
return dict(
kwargs,
ssh_config=new_config,
)
def load_ssh_files(self):
"""
Trigger loading of configured SSH config file paths.
Expects that ``base_ssh_config`` has already been set to an
`~paramiko.config.SSHConfig` object.
:returns: ``None``.
"""
if self._runtime_ssh_path is not None:
path = self._runtime_ssh_path
# Manually blow up like open() (_load_ssh_file normally doesn't)
if not os.path.exists(path):
msg = "No such file or directory: {0!r}".format(path)
raise IOError(errno.ENOENT, msg)
self._load_ssh_file(os.path.expanduser(path))
elif self.load_ssh_configs:
for path in (self._user_ssh_path, self._system_ssh_path):
self._load_ssh_file(os.path.expanduser(path))
def _load_ssh_file(self, path):
"""
Attempt to open and parse an SSH config file at ``path``.
Does nothing if ``path`` is not a path to a valid file.
:returns: ``None``.
"""
if os.path.isfile(path):
old_rules = len(self.base_ssh_config._config)
with open(path) as fd:
self.base_ssh_config.parse(fd)
new_rules = len(self.base_ssh_config._config)
msg = "Loaded {0} new ssh_config rules from {1!r}"
debug(msg.format(new_rules - old_rules, path))
else:
debug("File not found, skipping")
@staticmethod
def global_defaults():
"""
Default configuration values and behavior toggles.
Fabric only extends this method in order to make minor adjustments and
additions to Invoke's `~invoke.config.Config.global_defaults`; see its
documentation for the base values, such as the config subtrees
controlling behavior of ``run`` or how ``tasks`` behave.
For Fabric-specific modifications and additions to the Invoke-level
defaults, see our own config docs at :ref:`default-values`.
"""
# TODO: is it worth moving all of our 'new' settings to a discrete
# namespace for cleanliness' sake? e.g. ssh.port, ssh.user etc.
# It wouldn't actually simplify this code any, but it would make it
# easier for users to determine what came from which library/repo.
defaults = InvokeConfig.global_defaults()
ours = {
# New settings
'connect_kwargs': {},
'forward_agent': False,
'gateway': None,
'load_ssh_configs': True,
'port': 22,
'run': {
'replace_env': True,
},
'ssh_config_path': None,
# TODO: this becomes an override/extend once Invoke grows execution
# timeouts (which should be timeouts.execute)
'timeouts': {
'connect': None,
},
'user': get_local_user(),
}
merge_dicts(defaults, ours)
return defaults
|
########
# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import hashlib
from flask import g
from werkzeug.local import LocalProxy
from cloudify import constants
from manager_rest.storage import get_storage_manager, models
@LocalProxy
def current_execution():
return getattr(g, 'current_execution', None)
def set_current_execution(execution):
"""
Sets the current execution, lasts for the lifetime of the request.
"""
g.current_execution = execution
def get_current_execution_by_token(execution_token):
sm = get_storage_manager()
hashed = hashlib.sha256(execution_token.encode('ascii')).hexdigest()
token_filter = {models.Execution.token: hashed}
executions = sm.full_access_list(models.Execution, filters=token_filter)
if len(executions) != 1: # Only one execution should match the token
return None
return executions[0]
def get_execution_token_from_request(request):
return request.headers.get(constants.CLOUDIFY_EXECUTION_TOKEN_HEADER)
RD-2659 Optimize getting current_execution (#3131)
storage-manager is just unnecessary here, although that's not the
most important change.
Join tenant and creator, so that when the code next does
current_execution.tenant, and current_execution.creator (for the
purposes of set_current_tenant/set_current_user), those items
are already there, and don't require another query.
########
# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import hashlib
from flask import g
from werkzeug.local import LocalProxy
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
from cloudify import constants
from manager_rest.storage import models, db
@LocalProxy
def current_execution():
return getattr(g, 'current_execution', None)
def set_current_execution(execution):
"""Sets the current execution, lasts for the lifetime of the request."""
g.current_execution = execution
def get_current_execution_by_token(execution_token):
hashed = hashlib.sha256(execution_token.encode('ascii')).hexdigest()
try:
return (
models.Execution.query
.filter_by(token=hashed)
# tenant and creator are going to be fetched soon, so join them
.options(db.joinedload(models.Execution.tenant))
.options(db.joinedload(models.Execution.creator))
.one() # Only one execution should match the token
)
except (MultipleResultsFound, NoResultFound):
return None
def get_execution_token_from_request(request):
return request.headers.get(constants.CLOUDIFY_EXECUTION_TOKEN_HEADER)
|
# this program corresponds to special.py
### Means test is not done yet
# E Means test is giving error (E)
# F Means test is failing (F)
# EF Means test is giving error and Failing
#! Means test is segfaulting
# 8 Means test runs forever
### test_besselpoly
### test_mathieu_a
### test_mathieu_even_coef
### test_mathieu_odd_coef
### test_modfresnelp
### test_modfresnelm
# test_pbdv_seq
### test_pbvv_seq
### test_sph_harm
# test_sph_in
# test_sph_jn
# test_sph_kn
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy import array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp, \
log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_
from numpy.testing import assert_equal, assert_almost_equal, \
assert_array_equal, assert_array_almost_equal, assert_approx_equal, \
assert_, rand, dec, TestCase, run_module_suite, assert_allclose, \
assert_raises
from numpy.testing.utils import WarningManager
from scipy import special
import scipy.special._ufuncs as cephes
from scipy.special import ellipk
from scipy.special._testutils import assert_tol_equal, with_special_errors, \
assert_func_equal
class TestCephes(TestCase):
def test_airy(self):
cephes.airy(0)
def test_airye(self):
cephes.airye(0)
def test_binom(self):
n = np.array([0.264, 4, 5.2, 17])
k = np.array([2, 0.4, 7, 3.3])
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389,
-0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846],
[10.92, 2.22993515861399, -0.00585728, 10.468891352063146],
[136, 3.5252179590758828, 19448, 1024.5526916174495]])
assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13)
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_2(self):
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.logspace(1, 300, 20)]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_exact(self):
@np.vectorize
def binom_int(n, k):
n = int(n)
k = int(k)
num = int(1)
den = int(1)
for i in range(1, k+1):
num *= i + n - k
den *= i
return float(num/den)
np.random.seed(1234)
n = np.arange(1, 15)
k = np.arange(0, 15)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
nk = nk[nk[:,0] >= nk[:,1]]
assert_func_equal(cephes.binom,
binom_int(nk[:,0], nk[:,1]),
nk,
atol=0, rtol=0)
def test_bdtr(self):
assert_equal(cephes.bdtr(1,1,0.5),1.0)
def test_bdtri(self):
assert_equal(cephes.bdtri(1,3,0.5),0.5)
def test_bdtrc(self):
assert_equal(cephes.bdtrc(1,3,0.5),0.5)
def test_bdtrin(self):
assert_equal(cephes.bdtrin(1,0,1),5.0)
def test_bdtrik(self):
cephes.bdtrik(1,3,0.5)
def test_bei(self):
assert_equal(cephes.bei(0),0.0)
def test_beip(self):
assert_equal(cephes.beip(0),0.0)
def test_ber(self):
assert_equal(cephes.ber(0),1.0)
def test_berp(self):
assert_equal(cephes.berp(0),0.0)
def test_besselpoly(self):
assert_equal(cephes.besselpoly(0,0,0),1.0)
def test_beta(self):
assert_equal(cephes.beta(1,1),1.0)
def test_betainc(self):
assert_equal(cephes.betainc(1,1,1),1.0)
def test_betaln(self):
assert_equal(cephes.betaln(1,1),0.0)
def test_betaincinv(self):
assert_equal(cephes.betaincinv(1,1,1),1.0)
def test_beta_inf(self):
assert_(np.isinf(special.beta(-1, 2)))
def test_btdtr(self):
assert_equal(cephes.btdtr(1,1,1),1.0)
def test_btdtri(self):
assert_equal(cephes.btdtri(1,1,1),1.0)
def test_btdtria(self):
assert_equal(cephes.btdtria(1,1,1),5.0)
def test_btdtrib(self):
assert_equal(cephes.btdtrib(1,1,1),5.0)
def test_cbrt(self):
assert_approx_equal(cephes.cbrt(1),1.0)
def test_chdtr(self):
assert_equal(cephes.chdtr(1,0),0.0)
def test_chdtrc(self):
assert_equal(cephes.chdtrc(1,0),1.0)
def test_chdtri(self):
assert_equal(cephes.chdtri(1,1),0.0)
def test_chdtriv(self):
assert_equal(cephes.chdtriv(0,0),5.0)
def test_chndtr(self):
assert_equal(cephes.chndtr(0,1,0),0.0)
p = cephes.chndtr(np.linspace(20, 25, 5), 2, 1.07458615e+02)
assert_allclose(p, [1.21805009e-09, 2.81979982e-09, 6.25652736e-09,
1.33520017e-08, 2.74909967e-08],
rtol=1e-6, atol=0)
def test_chndtridf(self):
assert_equal(cephes.chndtridf(0,0,1),5.0)
def test_chndtrinc(self):
assert_equal(cephes.chndtrinc(0,1,0),5.0)
def test_chndtrix(self):
assert_equal(cephes.chndtrix(0,1,0),0.0)
def test_cosdg(self):
assert_equal(cephes.cosdg(0),1.0)
def test_cosm1(self):
assert_equal(cephes.cosm1(0),0.0)
def test_cotdg(self):
assert_almost_equal(cephes.cotdg(45),1.0)
def test_dawsn(self):
assert_equal(cephes.dawsn(0),0.0)
assert_allclose(cephes.dawsn(1.23), 0.50053727749081767)
def test_ellipe(self):
assert_equal(cephes.ellipe(1),1.0)
def test_ellipeinc(self):
assert_equal(cephes.ellipeinc(0,1),0.0)
def test_ellipj(self):
cephes.ellipj(0,1)
def test_ellipk(self):
assert_allclose(ellipk(0), pi/2)
def test_ellipkinc(self):
assert_equal(cephes.ellipkinc(0,0),0.0)
def test_erf(self):
assert_equal(cephes.erf(0),0.0)
def test_erfc(self):
assert_equal(cephes.erfc(0),1.0)
def test_exp1(self):
cephes.exp1(1)
def test_expi(self):
cephes.expi(1)
def test_expn(self):
cephes.expn(1,1)
def test_exp1_reg(self):
# Regression for #834
a = cephes.exp1(-complex(19.9999990))
b = cephes.exp1(-complex(19.9999991))
assert_array_almost_equal(a.imag, b.imag)
def test_exp10(self):
assert_approx_equal(cephes.exp10(2),100.0)
def test_exp2(self):
assert_equal(cephes.exp2(2),4.0)
def test_expm1(self):
assert_equal(cephes.expm1(0),0.0)
def test_fdtr(self):
assert_equal(cephes.fdtr(1,1,0),0.0)
def test_fdtrc(self):
assert_equal(cephes.fdtrc(1,1,0),1.0)
def test_fdtri(self):
# cephes.fdtri(1,1,0.5) #BUG: gives NaN, should be 1
assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]),
array([0.9937365, 1.00630298]), rtol=1e-6)
def test_fdtridfd(self):
assert_equal(cephes.fdtridfd(1,0,0),5.0)
def test_fresnel(self):
assert_equal(cephes.fresnel(0),(0.0,0.0))
def test_gamma(self):
assert_equal(cephes.gamma(5),24.0)
def test_gammainc(self):
assert_equal(cephes.gammainc(5,0),0.0)
def test_gammaincc(self):
assert_equal(cephes.gammaincc(5,0),1.0)
def test_gammainccinv(self):
assert_equal(cephes.gammainccinv(5,1),0.0)
def test_gammaln(self):
cephes.gammaln(10)
def test_gammasgn(self):
vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64)
assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals)))
def test_gdtr(self):
assert_equal(cephes.gdtr(1,1,0),0.0)
def test_gdtrc(self):
assert_equal(cephes.gdtrc(1,1,0),1.0)
def test_gdtria(self):
assert_equal(cephes.gdtria(0,1,1),0.0)
def test_gdtrib(self):
cephes.gdtrib(1,0,1)
# assert_equal(cephes.gdtrib(1,0,1),5.0)
def test_gdtrix(self):
cephes.gdtrix(1,1,.1)
def test_hankel1(self):
cephes.hankel1(1,1)
def test_hankel1e(self):
cephes.hankel1e(1,1)
def test_hankel2(self):
cephes.hankel2(1,1)
def test_hankel2e(self):
cephes.hankel2e(1,1)
def test_hyp1f1(self):
assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
cephes.hyp1f1(1,1,1)
def test_hyp1f2(self):
cephes.hyp1f2(1,1,1,1)
def test_hyp2f0(self):
cephes.hyp2f0(1,1,1,1)
def test_hyp2f1(self):
assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
def test_hyp3f0(self):
assert_equal(cephes.hyp3f0(1,1,1,0),(1.0,0.0))
def test_hyperu(self):
assert_equal(cephes.hyperu(0,1,1),1.0)
def test_i0(self):
assert_equal(cephes.i0(0),1.0)
def test_i0e(self):
assert_equal(cephes.i0e(0),1.0)
def test_i1(self):
assert_equal(cephes.i1(0),0.0)
def test_i1e(self):
assert_equal(cephes.i1e(0),0.0)
def test_it2i0k0(self):
cephes.it2i0k0(1)
def test_it2j0y0(self):
cephes.it2j0y0(1)
def test_it2struve0(self):
cephes.it2struve0(1)
def test_itairy(self):
cephes.itairy(1)
def test_iti0k0(self):
assert_equal(cephes.iti0k0(0),(0.0,0.0))
def test_itj0y0(self):
assert_equal(cephes.itj0y0(0),(0.0,0.0))
def test_itmodstruve0(self):
assert_equal(cephes.itmodstruve0(0),0.0)
def test_itstruve0(self):
assert_equal(cephes.itstruve0(0),0.0)
def test_iv(self):
assert_equal(cephes.iv(1,0),0.0)
def _check_ive(self):
assert_equal(cephes.ive(1,0),0.0)
def test_j0(self):
assert_equal(cephes.j0(0),1.0)
def test_j1(self):
assert_equal(cephes.j1(0),0.0)
def test_jn(self):
assert_equal(cephes.jn(0,0),1.0)
def test_jv(self):
assert_equal(cephes.jv(0,0),1.0)
def _check_jve(self):
assert_equal(cephes.jve(0,0),1.0)
def test_k0(self):
cephes.k0(2)
def test_k0e(self):
cephes.k0e(2)
def test_k1(self):
cephes.k1(2)
def test_k1e(self):
cephes.k1e(2)
def test_kei(self):
cephes.kei(2)
def test_keip(self):
assert_equal(cephes.keip(0),0.0)
def test_ker(self):
cephes.ker(2)
def test_kerp(self):
cephes.kerp(2)
def _check_kelvin(self):
cephes.kelvin(2)
def test_kn(self):
cephes.kn(1,1)
def test_kolmogi(self):
assert_equal(cephes.kolmogi(1),0.0)
assert_(np.isnan(cephes.kolmogi(np.nan)))
def test_kolmogorov(self):
assert_equal(cephes.kolmogorov(0),1.0)
def _check_kv(self):
cephes.kv(1,1)
def _check_kve(self):
cephes.kve(1,1)
def test_log1p(self):
assert_equal(cephes.log1p(0),0.0)
def test_lpmv(self):
assert_equal(cephes.lpmv(0,0,1),1.0)
def test_mathieu_a(self):
assert_equal(cephes.mathieu_a(1,0),1.0)
def test_mathieu_b(self):
assert_equal(cephes.mathieu_b(1,0),1.0)
def test_mathieu_cem(self):
assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
# Test AMS 20.2.27
@np.vectorize
def ce_smallq(m, q, z):
z *= np.pi/180
if m == 0:
return 2**(-0.5) * (1 - .5*q*cos(2*z)) # + O(q^2)
elif m == 1:
return cos(z) - q/8 * cos(3*z) # + O(q^2)
elif m == 2:
return cos(2*z) - q*(cos(4*z)/12 - 1/4) # + O(q^2)
else:
return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(0, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0],
ce_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_sem(self):
assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
# Test AMS 20.2.27
@np.vectorize
def se_smallq(m, q, z):
z *= np.pi/180
if m == 1:
return sin(z) - q/8 * sin(3*z) # + O(q^2)
elif m == 2:
return sin(2*z) - q*sin(4*z)/12 # + O(q^2)
else:
return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(1, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0],
se_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_modcem1(self):
assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
def test_mathieu_modcem2(self):
cephes.mathieu_modcem2(1,1,1)
# Test reflection relation AMS 20.6.19
m = np.arange(0, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modcem2(m, q, -z)[0]
fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0]
y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_modsem1(self):
assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
def test_mathieu_modsem2(self):
cephes.mathieu_modsem2(1,1,1)
# Test reflection relation AMS 20.6.20
m = np.arange(1, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modsem2(m, q, -z)[0]
fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1]
y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_overflow(self):
# Check that these return NaNs instead of causing a SEGV
assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan))
def test_mathieu_ticket_1847(self):
# Regression test --- this call had some out-of-bounds access
# and could return nan occasionally
for k in range(60):
v = cephes.mathieu_modsem2(2, 100, -1)
# Values from ACM TOMS 804 (derivate by numerical differentiation)
assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10)
assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4)
def test_modfresnelm(self):
cephes.modfresnelm(0)
def test_modfresnelp(self):
cephes.modfresnelp(0)
def _check_modstruve(self):
assert_equal(cephes.modstruve(1,0),0.0)
def test_nbdtr(self):
assert_equal(cephes.nbdtr(1,1,1),1.0)
def test_nbdtrc(self):
assert_equal(cephes.nbdtrc(1,1,1),0.0)
def test_nbdtri(self):
assert_equal(cephes.nbdtri(1,1,1),1.0)
def __check_nbdtrik(self):
cephes.nbdtrik(1,.4,.5)
def test_nbdtrin(self):
assert_equal(cephes.nbdtrin(1,0,0),5.0)
def test_ncfdtr(self):
assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def test_ncfdtri(self):
assert_equal(cephes.ncfdtri(1,1,1,0),0.0)
def test_ncfdtridfd(self):
cephes.ncfdtridfd(1,0.5,0,1)
def __check_ncfdtridfn(self):
cephes.ncfdtridfn(1,0.5,0,1)
def __check_ncfdtrinc(self):
cephes.ncfdtrinc(1,0.5,0,1)
def test_nctdtr(self):
assert_equal(cephes.nctdtr(1,0,0),0.5)
def __check_nctdtridf(self):
cephes.nctdtridf(1,0.5,0)
def test_nctdtrinc(self):
cephes.nctdtrinc(1,0,0)
def test_nctdtrit(self):
cephes.nctdtrit(.1,0.2,.5)
def test_ndtr(self):
assert_equal(cephes.ndtr(0), 0.5)
assert_almost_equal(cephes.ndtr(1), 0.84134474606)
def test_ndtri(self):
assert_equal(cephes.ndtri(0.5),0.0)
def test_nrdtrimn(self):
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def test_nrdtrisd(self):
assert_tol_equal(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,
atol=0, rtol=0)
def test_obl_ang1(self):
cephes.obl_ang1(1,1,1,0)
def test_obl_ang1_cv(self):
result = cephes.obl_ang1_cv(1,1,1,1,0)
assert_almost_equal(result[0],1.0)
assert_almost_equal(result[1],0.0)
def _check_obl_cv(self):
assert_equal(cephes.obl_cv(1,1,0),2.0)
def test_obl_rad1(self):
cephes.obl_rad1(1,1,1,0)
def test_obl_rad1_cv(self):
cephes.obl_rad1_cv(1,1,1,1,0)
def test_obl_rad2(self):
cephes.obl_rad2(1,1,1,0)
def test_obl_rad2_cv(self):
cephes.obl_rad2_cv(1,1,1,1,0)
def test_pbdv(self):
assert_equal(cephes.pbdv(1,0),(0.0,1.0))
def test_pbvv(self):
cephes.pbvv(1,0)
def test_pbwa(self):
cephes.pbwa(1,0)
def test_pdtr(self):
cephes.pdtr(0,1)
def test_pdtrc(self):
cephes.pdtrc(0,1)
def test_pdtri(self):
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.simplefilter("ignore", RuntimeWarning)
cephes.pdtri(0.5,0.5)
finally:
warn_ctx.__exit__()
def test_pdtrik(self):
cephes.pdtrik(0.5,1)
def test_pro_ang1(self):
cephes.pro_ang1(1,1,1,0)
def test_pro_ang1_cv(self):
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
array((1.0,0.0)))
def _check_pro_cv(self):
assert_equal(cephes.pro_cv(1,1,0),2.0)
def test_pro_rad1(self):
cephes.pro_rad1(1,1,1,0.1)
def test_pro_rad1_cv(self):
cephes.pro_rad1_cv(1,1,1,1,0)
def test_pro_rad2(self):
cephes.pro_rad2(1,1,1,0)
def test_pro_rad2_cv(self):
cephes.pro_rad2_cv(1,1,1,1,0)
def test_psi(self):
cephes.psi(1)
def test_radian(self):
assert_equal(cephes.radian(0,0,0),0)
def test_rgamma(self):
assert_equal(cephes.rgamma(1),1.0)
def test_round(self):
assert_equal(cephes.round(3.4),3.0)
assert_equal(cephes.round(-3.4),-3.0)
assert_equal(cephes.round(3.6),4.0)
assert_equal(cephes.round(-3.6),-4.0)
assert_equal(cephes.round(3.5),4.0)
assert_equal(cephes.round(-3.5),-4.0)
def test_shichi(self):
cephes.shichi(1)
def test_sici(self):
cephes.sici(1)
s, c = cephes.sici(np.inf)
assert_almost_equal(s, np.pi * 0.5)
assert_almost_equal(c, 0)
s, c = cephes.sici(-np.inf)
assert_almost_equal(s, -np.pi * 0.5)
assert_(np.isnan(c), "cosine integral(-inf) is not nan")
def test_sindg(self):
assert_equal(cephes.sindg(90),1.0)
def test_smirnov(self):
assert_equal(cephes.smirnov(1,.1),0.9)
assert_(np.isnan(cephes.smirnov(1,np.nan)))
def test_smirnovi(self):
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
assert_(np.isnan(cephes.smirnovi(1,np.nan)))
def test_spence(self):
assert_equal(cephes.spence(1),0.0)
def test_stdtr(self):
assert_equal(cephes.stdtr(1,0),0.5)
assert_almost_equal(cephes.stdtr(1,1), 0.75)
assert_almost_equal(cephes.stdtr(1,2), 0.852416382349)
def test_stdtridf(self):
cephes.stdtridf(0.7,1)
def test_stdtrit(self):
cephes.stdtrit(1,0.7)
def test_struve(self):
assert_equal(cephes.struve(0,0),0.0)
def test_tandg(self):
assert_equal(cephes.tandg(45),1.0)
def test_tklmbda(self):
assert_almost_equal(cephes.tklmbda(1,1),1.0)
def test_y0(self):
cephes.y0(1)
def test_y1(self):
cephes.y1(1)
def test_yn(self):
cephes.yn(1,1)
def test_yv(self):
cephes.yv(1,1)
def _check_yve(self):
cephes.yve(1,1)
def test_zeta(self):
cephes.zeta(2,2)
def test_zetac(self):
assert_equal(cephes.zetac(0),-1.5)
def test_wofz(self):
z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.),
complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.),
complex(-0.0000000234545,1.1234), complex(-3.,5.1),
complex(-53,30.1), complex(0.0,0.12345),
complex(11,1), complex(-22,-2), complex(9,-28),
complex(21,-33), complex(1e5,1e5), complex(1e14,1e14)
]
w = [
complex(-3.78270245518980507452677445620103199303131110e-7,
0.000903861276433172057331093754199933411710053155),
complex(0.1764906227004816847297495349730234591778719532788,
-0.02146550539468457616788719893991501311573031095617),
complex(0.2410250715772692146133539023007113781272362309451,
0.06087579663428089745895459735240964093522265589350),
complex(0.30474420525691259245713884106959496013413834051768,
-0.20821893820283162728743734725471561394145872072738),
complex(7.317131068972378096865595229600561710140617977e34,
8.321873499714402777186848353320412813066170427e34),
complex(0.0615698507236323685519612934241429530190806818395,
-0.00676005783716575013073036218018565206070072304635),
complex(0.3960793007699874918961319170187598400134746631,
-5.593152259116644920546186222529802777409274656e-9),
complex(0.08217199226739447943295069917990417630675021771804,
-0.04701291087643609891018366143118110965272615832184),
complex(0.00457246000350281640952328010227885008541748668738,
-0.00804900791411691821818731763401840373998654987934),
complex(0.8746342859608052666092782112565360755791467973338452,
0.),
complex(0.00468190164965444174367477874864366058339647648741,
0.0510735563901306197993676329845149741675029197050),
complex(-0.0023193175200187620902125853834909543869428763219,
-0.025460054739731556004902057663500272721780776336),
complex(9.11463368405637174660562096516414499772662584e304,
3.97101807145263333769664875189354358563218932e305),
complex(-4.4927207857715598976165541011143706155432296e281,
-2.8019591213423077494444700357168707775769028e281),
complex(2.820947917809305132678577516325951485807107151e-6,
2.820947917668257736791638444590253942253354058e-6),
complex(2.82094791773878143474039725787438662716372268e-15,
2.82094791773878143474039725773333923127678361e-15)
]
assert_func_equal(cephes.wofz, w, z, rtol=1e-13)
class TestAiry(TestCase):
def test_airy(self):
# This tests the airy function to ensure 8 place accuracy in computation
x = special.airy(.99)
assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
x = special.airy(.41)
assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
x = special.airy(-.36)
assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
def test_airye(self):
a = special.airye(0.01)
b = special.airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_array_almost_equal(a,b1,6)
def test_bi_zeros(self):
bi = special.bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([0.60195789, -0.76031014]))
assert_array_almost_equal(bi,bia,4)
def test_ai_zeros(self):
ai = special.ai_zeros(1)
assert_array_almost_equal(ai,(array([-2.33810741]),
array([-1.01879297]),
array([0.5357]),
array([0.7012])),4)
class TestAssocLaguerre(TestCase):
def test_assoc_laguerre(self):
a1 = special.genlaguerre(11,1)
a2 = special.assoc_laguerre(.2,11,1)
assert_array_almost_equal(a2,a1(.2),8)
a2 = special.assoc_laguerre(1,11,1)
assert_array_almost_equal(a2,a1(1),8)
class TestBesselpoly(TestCase):
def test_besselpoly(self):
pass
class TestKelvin(TestCase):
def test_bei(self):
mbei = special.bei(2)
assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact
def test_beip(self):
mbeip = special.beip(2)
assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact
def test_ber(self):
mber = special.ber(2)
assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact
def test_berp(self):
mberp = special.berp(2)
assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact
def test_bei_zeros(self):
bi = special.bi_zeros(5)
assert_array_almost_equal(bi[0],array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),11)
assert_array_almost_equal(bi[1],array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),10)
assert_array_almost_equal(bi[2],array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),11)
assert_array_almost_equal(bi[3],array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),11)
def test_beip_zeros(self):
bip = special.beip_zeros(5)
assert_array_almost_equal(bip,array([3.772673304934953,
8.280987849760042,
12.742147523633703,
17.193431752512542,
21.641143941167325]),4)
def test_ber_zeros(self):
ber = special.ber_zeros(5)
assert_array_almost_equal(ber,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
def test_berp_zeros(self):
brp = special.berp_zeros(5)
assert_array_almost_equal(brp,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
def test_kelvin(self):
mkelv = special.kelvin(2)
assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,
special.ker(2) + special.kei(2)*1j,
special.berp(2) + special.beip(2)*1j,
special.kerp(2) + special.keip(2)*1j),8)
def test_kei(self):
mkei = special.kei(2)
assert_almost_equal(mkei,-0.20240006776470432,5)
def test_keip(self):
mkeip = special.keip(2)
assert_almost_equal(mkeip,0.21980790991960536,5)
def test_ker(self):
mker = special.ker(2)
assert_almost_equal(mker,-0.041664513991509472,5)
def test_kerp(self):
mkerp = special.kerp(2)
assert_almost_equal(mkerp,-0.10660096588105264,5)
def test_kei_zeros(self):
kei = special.kei_zeros(5)
assert_array_almost_equal(kei,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
def test_keip_zeros(self):
keip = special.keip_zeros(5)
assert_array_almost_equal(keip,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
# numbers come from 9.9 of A&S pg. 381
def test_kelvin_zeros(self):
tmp = special.kelvin_zeros(5)
berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
assert_array_almost_equal(berz,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
assert_array_almost_equal(beiz,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
assert_array_almost_equal(kerz,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44382]),4)
assert_array_almost_equal(keiz,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
assert_array_almost_equal(berpz,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
assert_array_almost_equal(beipz,array([3.77267,
# table from 1927 had 3.77320
# but this is more accurate
8.28099,
12.74215,
17.19343,
21.64114]),4)
assert_array_almost_equal(kerpz,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
assert_array_almost_equal(keipz,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
def test_ker_zeros(self):
ker = special.ker_zeros(5)
assert_array_almost_equal(ker,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44381]),4)
def test_kerp_zeros(self):
kerp = special.kerp_zeros(5)
assert_array_almost_equal(kerp,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
class TestBernoulli(TestCase):
def test_bernoulli(self):
brn = special.bernoulli(5)
assert_array_almost_equal(brn,array([1.0000,
-0.5000,
0.1667,
0.0000,
-0.0333,
0.0000]),4)
class TestBeta(TestCase):
def test_beta(self):
bet = special.beta(2,4)
betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
assert_almost_equal(bet,betg,8)
def test_betaln(self):
betln = special.betaln(2,4)
bet = log(abs(special.beta(2,4)))
assert_almost_equal(betln,bet,8)
def test_betainc(self):
btinc = special.betainc(1,1,.2)
assert_almost_equal(btinc,0.2,8)
def test_betaincinv(self):
y = special.betaincinv(2,4,.5)
comp = special.betainc(2,4,y)
assert_almost_equal(comp,.5,5)
class TestTrigonometric(TestCase):
def test_cbrt(self):
cb = special.cbrt(27)
cbrl = 27**(1.0/3.0)
assert_approx_equal(cb,cbrl)
def test_cbrtmore(self):
cb1 = special.cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_almost_equal(cb1,cbrl1,8)
def test_cosdg(self):
cdg = special.cosdg(90)
cdgrl = cos(pi/2.0)
assert_almost_equal(cdg,cdgrl,8)
def test_cosdgmore(self):
cdgm = special.cosdg(30)
cdgmrl = cos(pi/6.0)
assert_almost_equal(cdgm,cdgmrl,8)
def test_cosm1(self):
cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_array_almost_equal(cs,csrl,8)
def test_cotdg(self):
ct = special.cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_almost_equal(ct,ctrl,8)
def test_cotdgmore(self):
ct1 = special.cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_almost_equal(ct1,ctrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.cotdg(45), 1.0, 14)
assert_almost_equal(special.cotdg(-45), -1.0, 14)
assert_almost_equal(special.cotdg(90), 0.0, 14)
assert_almost_equal(special.cotdg(-90), 0.0, 14)
assert_almost_equal(special.cotdg(135), -1.0, 14)
assert_almost_equal(special.cotdg(-135), 1.0, 14)
assert_almost_equal(special.cotdg(225), 1.0, 14)
assert_almost_equal(special.cotdg(-225), -1.0, 14)
assert_almost_equal(special.cotdg(270), 0.0, 14)
assert_almost_equal(special.cotdg(-270), 0.0, 14)
assert_almost_equal(special.cotdg(315), -1.0, 14)
assert_almost_equal(special.cotdg(-315), 1.0, 14)
assert_almost_equal(special.cotdg(765), 1.0, 14)
def test_sinc(self):
c = arange(-2,2,.1)
y = special.sinc(c)
yre = sin(pi*c)/(pi*c)
yre[20] = 1.0
assert_array_almost_equal(y, yre, 4)
# Regression test for ticket 1751.
assert_array_almost_equal(special.sinc([0]), 1)
def test_0(self):
x = 0.0
assert_equal(special.sinc(x),1.0)
def test_sindg(self):
sn = special.sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = special.sindg(30)
snmrl = sin(pi/6.0)
assert_almost_equal(snm,snmrl,8)
snm1 = special.sindg(45)
snmrl1 = sin(pi/4.0)
assert_almost_equal(snm1,snmrl1,8)
class TestTandg(TestCase):
def test_tandg(self):
tn = special.tandg(30)
tnrl = tan(pi/6.0)
assert_almost_equal(tn,tnrl,8)
def test_tandgmore(self):
tnm = special.tandg(45)
tnmrl = tan(pi/4.0)
assert_almost_equal(tnm,tnmrl,8)
tnm1 = special.tandg(60)
tnmrl1 = tan(pi/3.0)
assert_almost_equal(tnm1,tnmrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.tandg(0), 0.0, 14)
assert_almost_equal(special.tandg(45), 1.0, 14)
assert_almost_equal(special.tandg(-45), -1.0, 14)
assert_almost_equal(special.tandg(135), -1.0, 14)
assert_almost_equal(special.tandg(-135), 1.0, 14)
assert_almost_equal(special.tandg(180), 0.0, 14)
assert_almost_equal(special.tandg(-180), 0.0, 14)
assert_almost_equal(special.tandg(225), 1.0, 14)
assert_almost_equal(special.tandg(-225), -1.0, 14)
assert_almost_equal(special.tandg(315), -1.0, 14)
assert_almost_equal(special.tandg(-315), 1.0, 14)
class TestEllip(TestCase):
def test_ellipj_nan(self):
"""Regression test for #912."""
special.ellipj(0.5, np.nan)
def test_ellipj(self):
el = special.ellipj(0.2,0)
rel = [sin(0.2),cos(0.2),1.0,0.20]
assert_array_almost_equal(el,rel,13)
def test_ellipk(self):
elk = special.ellipk(.2)
assert_almost_equal(elk,1.659623598610528,11)
def test_ellipkinc(self):
elkinc = special.ellipkinc(pi/2,.2)
elk = special.ellipk(0.2)
assert_almost_equal(elkinc,elk,15)
alpha = 20*pi/180
phi = 45*pi/180
m = sin(alpha)**2
elkinc = special.ellipkinc(phi,m)
assert_almost_equal(elkinc,0.79398143,8)
# From pg. 614 of A & S
def test_ellipe(self):
ele = special.ellipe(.2)
assert_almost_equal(ele,1.4890350580958529,8)
def test_ellipeinc(self):
eleinc = special.ellipeinc(pi/2,.2)
ele = special.ellipe(0.2)
assert_almost_equal(eleinc,ele,14)
# pg 617 of A & S
alpha, phi = 52*pi/180,35*pi/180
m = sin(alpha)**2
eleinc = special.ellipeinc(phi,m)
assert_almost_equal(eleinc, 0.58823065, 8)
class TestErf(TestCase):
def test_erf(self):
er = special.erf(.25)
assert_almost_equal(er,0.2763263902,8)
def test_erf_zeros(self):
erz = special.erf_zeros(5)
erzr = array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_array_almost_equal(erz,erzr,4)
def _check_variant_func(self, func, other_func, rtol, atol=0):
np.random.seed(1234)
n = 10000
x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
z = x + 1j*y
old_errors = np.seterr(all='ignore')
try:
w = other_func(z)
w_real = other_func(x).real
mask = np.isfinite(w)
w = w[mask]
z = z[mask]
mask = np.isfinite(w_real)
w_real = w_real[mask]
x = x[mask]
# test both real and complex variants
assert_func_equal(func, w, z, rtol=rtol, atol=atol)
assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)
finally:
np.seterr(**old_errors)
def test_erfc_consistent(self):
self._check_variant_func(
cephes.erfc,
lambda z: 1 - cephes.erf(z),
rtol=1e-12,
atol=1e-14 # <- the test function loses precision
)
def test_erfcx_consistent(self):
self._check_variant_func(
cephes.erfcx,
lambda z: np.exp(z*z) * cephes.erfc(z),
rtol=1e-12
)
def test_erfi_consistent(self):
self._check_variant_func(
cephes.erfi,
lambda z: -1j * cephes.erf(1j*z),
rtol=1e-12
)
def test_dawsn_consistent(self):
self._check_variant_func(
cephes.dawsn,
lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),
rtol=1e-12
)
def test_erfcinv(self):
i = special.erfcinv(1)
assert_equal(i,0)
def test_erfinv(self):
i = special.erfinv(0)
assert_equal(i,0)
def test_errprint(self):
a = special.errprint()
b = 1-a # a is the state 1-a inverts state
c = special.errprint(b) # returns last state 'a'
assert_equal(a,c)
d = special.errprint(a) # returns to original state
assert_equal(d,b) # makes sure state was returned
# assert_equal(d,1-a)
class TestEuler(TestCase):
def test_euler(self):
eu0 = special.euler(0)
eu1 = special.euler(1)
eu2 = special.euler(2) # just checking segfaults
assert_almost_equal(eu0[0],1,8)
assert_almost_equal(eu2[2],-1,8)
eu24 = special.euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145,2404879675441,
370371188237525,69348874393137901,
15514534163557086905]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
olderr = np.seterr(all='ignore')
try:
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
finally:
np.seterr(**olderr)
assert_almost_equal(errmax, 0.0, 14)
class TestExp(TestCase):
def test_exp2(self):
ex = special.exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = special.exp2(2.5)
exmrl = 2**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_exp10(self):
ex = special.exp10(2)
exrl = 10**2
assert_approx_equal(ex,exrl)
def test_exp10more(self):
exm = special.exp10(2.5)
exmrl = 10**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_expm1(self):
ex = (special.expm1(2),special.expm1(3),special.expm1(4))
exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
assert_array_almost_equal(ex,exrl,8)
def test_expm1more(self):
ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))
exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
assert_array_almost_equal(ex1,exrl1,8)
class TestFresnel(TestCase):
def test_fresnel(self):
frs = array(special.fresnel(.5))
assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8)
# values from pg 329 Table 7.11 of A & S
# slightly corrected in 4th decimal place
def test_fresnel_zeros(self):
szo, czo = special.fresnel_zeros(5)
assert_array_almost_equal(szo,
array([2.0093+0.2885j,
2.8335+0.2443j,
3.4675+0.2185j,
4.0026+0.2009j,
4.4742+0.1877j]),3)
assert_array_almost_equal(czo,
array([1.7437+0.3057j,
2.6515+0.2529j,
3.3204+0.2240j,
3.8757+0.2047j,
4.3611+0.1907j]),3)
vals1 = special.fresnel(szo)[0]
vals2 = special.fresnel(czo)[1]
assert_array_almost_equal(vals1,0,14)
assert_array_almost_equal(vals2,0,14)
def test_fresnelc_zeros(self):
szo, czo = special.fresnel_zeros(6)
frc = special.fresnelc_zeros(6)
assert_array_almost_equal(frc,czo,12)
def test_fresnels_zeros(self):
szo, czo = special.fresnel_zeros(5)
frs = special.fresnels_zeros(5)
assert_array_almost_equal(frs,szo,12)
class TestGamma(TestCase):
def test_gamma(self):
gam = special.gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = special.gammaln(3)
lngam = log(special.gamma(3))
assert_almost_equal(gamln,lngam,8)
def test_gammainc(self):
gama = special.gammainc(.5,.5)
assert_almost_equal(gama,.7,1)
def test_gammaincnan(self):
gama = special.gammainc(-1,1)
assert_(isnan(gama))
def test_gammainczero(self):
# bad arg but zero integration limit
gama = special.gammainc(-1,0)
assert_equal(gama,0.0)
def test_gammaincc(self):
gicc = special.gammaincc(.5,.5)
greal = 1 - special.gammainc(.5,.5)
assert_almost_equal(gicc,greal,8)
def test_gammainccnan(self):
gama = special.gammaincc(-1,1)
assert_(isnan(gama))
def test_gammainccinv(self):
gccinv = special.gammainccinv(.5,.5)
gcinv = special.gammaincinv(.5,.5)
assert_almost_equal(gccinv,gcinv,8)
@with_special_errors
def test_gammaincinv(self):
y = special.gammaincinv(.4,.4)
x = special.gammainc(.4,y)
assert_almost_equal(x,0.4,1)
y = special.gammainc(10, 0.05)
x = special.gammaincinv(10, 2.5715803516000736e-20)
assert_almost_equal(0.05, x, decimal=10)
assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_almost_equal(11.0, x, decimal=10)
@with_special_errors
def test_975(self):
# Regression test for ticket #975 -- switch point in algorithm
# check that things work OK at the point, immediately next floats
# around it, and a bit further away
pts = [0.25,
np.nextafter(0.25, 0), 0.25 - 1e-12,
np.nextafter(0.25, 1), 0.25 + 1e-12]
for xp in pts:
y = special.gammaincinv(.4, xp)
x = special.gammainc(0.4, y)
assert_tol_equal(x, xp, rtol=1e-12)
def test_rgamma(self):
rgam = special.rgamma(8)
rlgam = 1/special.gamma(8)
assert_almost_equal(rgam,rlgam,8)
def test_infinity(self):
assert_(np.isinf(special.gamma(-1)))
assert_equal(special.rgamma(-1), 0)
class TestHankel(TestCase):
def test_negv1(self):
assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)
def test_hankel1(self):
hank1 = special.hankel1(1,.1)
hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)
assert_almost_equal(hank1,hankrl,8)
def test_negv1e(self):
assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)
def test_hankel1e(self):
hank1e = special.hankel1e(1,.1)
hankrle = special.hankel1(1,.1)*exp(-.1j)
assert_almost_equal(hank1e,hankrle,8)
def test_negv2(self):
assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)
def test_hankel2(self):
hank2 = special.hankel2(1,.1)
hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)
assert_almost_equal(hank2,hankrl2,8)
def test_neg2e(self):
assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)
def test_hankl2e(self):
hank2e = special.hankel2e(1,.1)
hankrl2e = special.hankel2e(1,.1)
assert_almost_equal(hank2e,hankrl2e,8)
class TestHyper(TestCase):
def test_h1vp(self):
h1 = special.h1vp(1,.1)
h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)
assert_almost_equal(h1,h1real,8)
def test_h2vp(self):
h2 = special.h2vp(1,.1)
h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)
assert_almost_equal(h2,h2real,8)
def test_hyp0f1(self):
# scalar input
assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)
# float input, expected values match mpmath
x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
1.37789689539747, 1.60373685288480])
assert_allclose(x, expected, rtol=1e-12)
# complex input
x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
assert_allclose(x, expected.astype(np.complex), rtol=1e-12)
# test broadcasting
x1 = [0.5, 1.5, 2.5]
x2 = [0, 1, 0.5]
x = special.hyp0f1(x1, x2)
expected = [1.0, 1.8134302039235093, 1.21482702689997]
assert_allclose(x, expected, rtol=1e-12)
x = special.hyp0f1(np.row_stack([x1] * 2), x2)
assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
assert_raises(ValueError, special.hyp0f1,
np.row_stack([x1] * 3), [0, 1])
def test_hyp1f1(self):
hyp1 = special.hyp1f1(.1,.1,.3)
assert_almost_equal(hyp1, 1.3498588075760032,7)
# test contributed by Moritz Deger (2008-05-29)
# http://projects.scipy.org/scipy/scipy/ticket/659
# reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
# produced with test_hyp1f1.nb
ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
[2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
[-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
[5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
[-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
[4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
[1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
[2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
[1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
[1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
[-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
[8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
[1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
[-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
[2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
[2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
[6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
[-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
[2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
[8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
[1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
[-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
[2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
[-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
[3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
[-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
[2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
[-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
[1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
[-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
[-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
[-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
[-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
[3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
[6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
[-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
[2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
[1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
[1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
[1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
[1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
[-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
[-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
[7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
[2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
[-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
[-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
[-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
[-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
[-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
[2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
[5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
[-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
[-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
[5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
[-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
[1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
[2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
[5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
[-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
[1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
[6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
[1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
[-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
[-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
[-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
[-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
[1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
[2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
[-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
[2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
[-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
[2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
[1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
[-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
[7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
[2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
[8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
[-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
[-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
[-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
[-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
[-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
[-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
[6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
[-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
[-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
[6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
[-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
[7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
[-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
[5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
[3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
[-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
[2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
[2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
[-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
[-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
[-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
[-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
for a,b,c,expected in ref_data:
result = special.hyp1f1(a,b,c)
assert_(abs(expected - result)/expected < 1e-4)
def test_hyp1f2(self):
pass
def test_hyp2f0(self):
pass
def test_hyp2f1(self):
# a collection of special cases taken from AMS 55
values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
[0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
[1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
[3, 3.5, 1.5, 0.2**2,
0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
[-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
[3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],
[3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) *
special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],
[5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) *
special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],
[4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) *
special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],
# and some others
# ticket #424
[1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
# negative integer a or b, with c-a-b integer and x > 0.9
[-2,3,1,0.95,0.715],
[2,-3,1,0.95,-0.007],
[-6,3,1,0.95,0.0000810625],
[2,-5,1,0.95,-0.000029375],
# huge negative integers
(10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),
(10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),
]
for i, (a, b, c, x, v) in enumerate(values):
cv = special.hyp2f1(a, b, c, x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_hyp3f0(self):
pass
def test_hyperu(self):
val1 = special.hyperu(1,0.1,100)
assert_almost_equal(val1,0.0098153,7)
a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
a,b = asarray(a), asarray(b)
z = 0.5
hypu = special.hyperu(a,b,z)
hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) /
(special.gamma(1+a-b)*special.gamma(b)) -
z**(1-b)*special.hyp1f1(1+a-b,2-b,z)
/ (special.gamma(a)*special.gamma(2-b)))
assert_array_almost_equal(hypu,hprl,12)
def test_hyperu_gh2287(self):
assert_almost_equal(special.hyperu(1, 1.5, 20.2),
0.048360918656699191, 12)
class TestBessel(TestCase):
def test_itj0y0(self):
it0 = array(special.itj0y0(.2))
assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
def test_it2j0y0(self):
it2 = array(special.it2j0y0(.2))
assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
def test_negv_iv(self):
assert_equal(special.iv(3,2), special.iv(-3,2))
def test_j0(self):
oz = special.j0(.1)
ozr = special.jn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_j1(self):
o1 = special.j1(.1)
o1r = special.jn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_jn(self):
jnnr = special.jn(1,.2)
assert_almost_equal(jnnr,0.099500832639235995,8)
def test_negv_jv(self):
assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = special.jv(v, x)
assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
def test_negv_jve(self):
assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)
def test_jve(self):
jvexp = special.jve(1,.2)
assert_almost_equal(jvexp,0.099500832639235995,8)
jvexp1 = special.jve(1,.2+1j)
z = .2+1j
jvexpr = special.jv(1,z)*exp(-abs(z.imag))
assert_almost_equal(jvexp1,jvexpr,8)
def test_jn_zeros(self):
jn0 = special.jn_zeros(0,5)
jn1 = special.jn_zeros(1,5)
assert_array_almost_equal(jn0,array([2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),4)
assert_array_almost_equal(jn1,array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),4)
jn102 = special.jn_zeros(102,5)
assert_tol_equal(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = special.jn_zeros(301,5)
assert_tol_equal(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = special.jn_zeros(0, 300)
assert_tol_equal(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_tol_equal(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_tol_equal(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = special.jn_zeros(10, 300)
assert_tol_equal(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_tol_equal(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_tol_equal(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = special.jn_zeros(3010,5)
assert_tol_equal(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
jn = special.jn
def jnp(n, x):
return (jn(n-1,x) - jn(n+1,x))/2
for nt in range(1, 30):
z, n, m, t = special.jnjnp_zeros(nt)
for zz, nn, tt in zip(z, n, t):
if tt == 0:
assert_allclose(jn(nn, zz), 0, atol=1e-6)
elif tt == 1:
assert_allclose(jnp(nn, zz), 0, atol=1e-6)
else:
raise AssertionError("Invalid t return for nt=%d" % nt)
def test_jnp_zeros(self):
jnp = special.jnp_zeros(1,5)
assert_array_almost_equal(jnp, array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),4)
jnp = special.jnp_zeros(443,5)
assert_tol_equal(special.jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = special.jnyn_zeros(1,5)
assert_array_almost_equal(jnz,(array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),5)
def test_jvp(self):
jvprim = special.jvp(2,2)
jv0 = (special.jv(1,2)-special.jv(3,2))/2
assert_almost_equal(jvprim,jv0,10)
def test_k0(self):
ozk = special.k0(.1)
ozkr = special.kv(0,.1)
assert_almost_equal(ozk,ozkr,8)
def test_k0e(self):
ozke = special.k0e(.1)
ozker = special.kve(0,.1)
assert_almost_equal(ozke,ozker,8)
def test_k1(self):
o1k = special.k1(.1)
o1kr = special.kv(1,.1)
assert_almost_equal(o1k,o1kr,8)
def test_k1e(self):
o1ke = special.k1e(.1)
o1ker = special.kve(1,.1)
assert_almost_equal(o1ke,o1ker,8)
def test_jacobi(self):
a = 5*rand() - 1
b = 5*rand() - 1
P0 = special.jacobi(0,a,b)
P1 = special.jacobi(1,a,b)
P2 = special.jacobi(2,a,b)
P3 = special.jacobi(3,a,b)
assert_array_almost_equal(P0.c,[1],13)
assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
def test_kn(self):
kn1 = special.kn(0,.2)
assert_almost_equal(kn1,1.7527038555281462,8)
def test_negv_kv(self):
assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
def test_kv0(self):
kv0 = special.kv(0,.2)
assert_almost_equal(kv0, 1.7527038555281462, 10)
def test_kv1(self):
kv1 = special.kv(1,0.2)
assert_almost_equal(kv1, 4.775972543220472, 10)
def test_kv2(self):
kv2 = special.kv(2,0.2)
assert_almost_equal(kv2, 49.51242928773287, 10)
def test_kn_largeorder(self):
assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)
def test_kv_largearg(self):
assert_equal(special.kv(0, 1e19), 0)
def test_negv_kve(self):
assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
def test_kve(self):
kve1 = special.kve(0,.2)
kv1 = special.kv(0,.2)*exp(.2)
assert_almost_equal(kve1,kv1,8)
z = .2+1j
kve2 = special.kve(0,z)
kv2 = special.kv(0,z)*exp(z)
assert_almost_equal(kve2,kv2,8)
def test_kvp_v0n1(self):
z = 2.2
assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
x = special.kvp(v,z, n=1)
assert_almost_equal(xc, x, 10) # this function (kvp) is broken
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
x = special.kvp(v, z, n=2)
assert_almost_equal(xc, x, 10)
def test_y0(self):
oz = special.y0(.1)
ozr = special.yn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_y1(self):
o1 = special.y1(.1)
o1r = special.yn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_y0_zeros(self):
yo,ypo = special.y0_zeros(2)
zo,zpo = special.y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)
assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)
def test_y1_zeros(self):
y1 = special.y1_zeros(1)
assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
def test_y1p_zeros(self):
y1p = special.y1p_zeros(1,complex=1)
assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3)
def test_yn_zeros(self):
an = special.yn_zeros(4,2)
assert_array_almost_equal(an,array([5.64515, 9.36162]),5)
an = special.yn_zeros(443,5)
assert_tol_equal(an, [450.13573091578090314, 463.05692376675001542,
472.80651546418663566, 481.27353184725625838,
488.98055964441374646], rtol=1e-15)
def test_ynp_zeros(self):
ao = special.ynp_zeros(0,2)
assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6)
ao = special.ynp_zeros(43,5)
assert_tol_equal(special.yvp(43, ao), 0, atol=1e-15)
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-9)
def test_ynp_zeros_large_order(self):
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-14)
def test_yn(self):
yn2n = special.yn(1,.2)
assert_almost_equal(yn2n,-3.3238249881118471,8)
def test_negv_yv(self):
assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)
def test_yv(self):
yv2 = special.yv(1,.2)
assert_almost_equal(yv2,-3.3238249881118471,8)
def test_negv_yve(self):
assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)
def test_yve(self):
yve2 = special.yve(1,.2)
assert_almost_equal(yve2,-3.3238249881118471,8)
yve2r = special.yv(1,.2+1j)*exp(-1)
yve22 = special.yve(1,.2+1j)
assert_almost_equal(yve22,yve2r,8)
def test_yvp(self):
yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
yvp1 = special.yvp(2,.2)
assert_array_almost_equal(yvp1,yvpr,10)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
for v in [-120, -100.3, -20., -10., -1., -.5,
0., 1., 12.49, 120., 301]:
for z in [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5,
700.6, 1300, 10003]:
yield v, z
# check half-integers; these are problematic points at least
# for cephes/iv
for v in 0.5 + arange(-60, 60):
yield v, 3.5
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert_(np.abs(c2) >= 1e300, (v, z))
elif np.isnan(c1):
assert_(c2.imag != 0, (v, z))
else:
assert_tol_equal(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_tol_equal(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
skipper = lambda v, z: (abs(v) > 50)
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)
def test_iv_cephes_vs_amos(self):
olderr = np.seterr(all='ignore')
try:
self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
finally:
np.seterr(**olderr)
@dec.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v[imsk].astype(int)
old_err = np.seterr(all='ignore')
try:
c1 = special.iv(v, x)
c2 = special.iv(v, x+0j)
# deal with differences in the inf and zero cutoffs
c1[abs(c1) > 1e300] = np.inf
c2[abs(c2) > 1e300] = np.inf
c1[abs(c1) < 1e-300] = 0
c2[abs(c2) < 1e-300] = 0
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
finally:
np.seterr(**old_err)
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))
def test_kv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_tol_equal(special.jv(3, 4), 0.43017147387562193)
assert_tol_equal(special.jv(301, 1300), 0.0183487151115275)
assert_tol_equal(special.jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_tol_equal(special.jv(-1, 1), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1), 0.43109886801837607952)
assert_tol_equal(special.yv(-0.5, 1), 0.6713967071418031)
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
assert_tol_equal(special.kv(-0.5, 1), 0.4610685044478945)
# amos
assert_tol_equal(special.jv(-1, 1+0j), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1+0j), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1+0j), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1+0j), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1+0j), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1+0j), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1+0j), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1+0j), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1+0j), 0.43109886801837607952)
assert_tol_equal(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_tol_equal(special.yv(-0.5, 1+0j), 0.6713967071418031)
assert_tol_equal(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_tol_equal(special.iv(-0.5, 1+0j), 1.231200214592967)
assert_tol_equal(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_tol_equal(special.kv(-0.5, 1+0j), 0.4610685044478945)
assert_tol_equal(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_tol_equal(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
assert_tol_equal(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_tol_equal(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))
assert_tol_equal(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert_(isnan(special.jv(0.5, -1)))
assert_(isnan(special.iv(0.5, -1)))
assert_(isnan(special.yv(0.5, -1)))
assert_(isnan(special.yv(1, -1)))
assert_(isnan(special.kv(0.5, -1)))
assert_(isnan(special.kv(1, -1)))
assert_(isnan(special.jve(0.5, -1)))
assert_(isnan(special.ive(0.5, -1)))
assert_(isnan(special.yve(0.5, -1)))
assert_(isnan(special.yve(1, -1)))
assert_(isnan(special.kve(0.5, -1)))
assert_(isnan(special.kve(1, -1)))
assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_tol_equal(special.iv(1, 700), 1.528500390233901e302)
assert_tol_equal(special.iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(float_)
r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_tol_equal(special.i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_tol_equal(special.i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_tol_equal(special.iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = special.i0(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i0e(self):
oize = special.i0e(.1)
oizer = special.ive(0,.1)
assert_almost_equal(oize,oizer,8)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = special.i1(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i1e(self):
oi1e = special.i1e(.1)
oi1er = special.ive(1,.1)
assert_almost_equal(oi1e,oi1er,8)
def test_iti0k0(self):
iti0 = array(special.iti0k0(5))
assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
def test_it2i0k0(self):
it2k = special.it2i0k0(.1)
assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
def test_iv(self):
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(iv1,0.90710092578230106,10)
def test_negv_ive(self):
assert_equal(special.ive(3,2), special.ive(-3,2))
def test_ive(self):
ive1 = special.ive(0,.1)
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(ive1,iv1,10)
def test_ivp0(self):
assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)
def test_ivp(self):
y = (special.iv(0,2) + special.iv(2,2))/2
x = special.ivp(1,2)
assert_almost_equal(x,y,10)
class TestLaguerre(TestCase):
def test_laguerre(self):
lag0 = special.laguerre(0)
lag1 = special.laguerre(1)
lag2 = special.laguerre(2)
lag3 = special.laguerre(3)
lag4 = special.laguerre(4)
lag5 = special.laguerre(5)
assert_array_almost_equal(lag0.c,[1],13)
assert_array_almost_equal(lag1.c,[-1,1],13)
assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
def test_genlaguerre(self):
k = 5*rand()-0.9
lag0 = special.genlaguerre(0,k)
lag1 = special.genlaguerre(1,k)
lag2 = special.genlaguerre(2,k)
lag3 = special.genlaguerre(3,k)
assert_equal(lag0.c,[1])
assert_equal(lag1.c,[-1,k+1])
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
# Base polynomials come from Abrahmowitz and Stegan
class TestLegendre(TestCase):
def test_legendre(self):
leg0 = special.legendre(0)
leg1 = special.legendre(1)
leg2 = special.legendre(2)
leg3 = special.legendre(3)
leg4 = special.legendre(4)
leg5 = special.legendre(5)
assert_equal(leg0.c,[1])
assert_equal(leg1.c,[1,0])
assert_equal(leg2.c,array([3,0,-1])/2.0)
assert_almost_equal(leg3.c,array([5,0,-3,0])/2.0)
assert_almost_equal(leg4.c,array([35,0,-30,0,3])/8.0)
assert_almost_equal(leg5.c,array([63,0,-70,0,15,0])/8.0)
class TestLambda(TestCase):
def test_lmbda(self):
lam = special.lmbda(1,.1)
lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),
array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))
assert_array_almost_equal(lam,lamr,8)
class TestLog1p(TestCase):
def test_log1p(self):
l1p = (special.log1p(10), special.log1p(11), special.log1p(12))
l1prl = (log(11), log(12), log(13))
assert_array_almost_equal(l1p,l1prl,8)
def test_log1pmore(self):
l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))
l1pmrl = (log(2),log(2.1),log(2.2))
assert_array_almost_equal(l1pm,l1pmrl,8)
class TestLegendreFunctions(TestCase):
def test_clpmn(self):
z = 0.5+0.3j
clp = special.clpmn(2, 2, z)
assert_array_almost_equal(clp,
(array([[1.0000, z, 0.5*(3*z*z-1)],
[0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)],
[0.0000, 0.0000, 3*(z*z-1)]]),
array([[0.0000, 1.0000, 3*z],
[0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)],
[0.0000, 0.0000, 6*z]]))
,7)
def test_clpmn_close_to_real(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi),
special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)])
,7)
def test_clpmn_across_unit_circle(self):
eps = 1e-7
m = 1
n = 1
x = 1j
assert_almost_equal(special.clpmn(m, n, x+1j*eps)[0][m, n],
special.clpmn(m, n, x-1j*eps)[0][m, n], 6)
def test_inf(self):
for z in (1, -1):
for n in range(4):
for m in range(1, n):
lp = special.clpmn(m, n, z)
assert_equal(lp[1][1,1:], np.inf)
lp = special.lpmn(m, n, z)
assert_equal(lp[1][1,1:], np.inf)
def test_lpmn(self):
lp = special.lpmn(0,2,.5)
assert_array_almost_equal(lp,(array([[1.00000,
0.50000,
-0.12500]]),
array([[0.00000,
1.00000,
1.50000]])),4)
def test_lpn(self):
lpnf = special.lpn(2,.5)
assert_array_almost_equal(lpnf,(array([1.00000,
0.50000,
-0.12500]),
array([0.00000,
1.00000,
1.50000])),4)
def test_lpmv(self):
lp = special.lpmv(0,2,.5)
assert_almost_equal(lp,-0.125,7)
lp = special.lpmv(0,40,.001)
assert_almost_equal(lp,0.1252678976534484,7)
# XXX: this is outside the domain of the current implementation,
# so ensure it returns a NaN rather than a wrong answer.
olderr = np.seterr(all='ignore')
try:
lp = special.lpmv(-1,-1,.001)
finally:
np.seterr(**olderr)
assert_(lp != 0 or np.isnan(lp))
def test_lqmn(self):
lqmnf = special.lqmn(0,2,.5)
lqmnf = special.lqmn(0,2,.5)
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
def test_lqmn_shape(self):
a, b = special.lqmn(4, 4, 1.1)
assert_equal(a.shape, (5, 5))
assert_equal(b.shape, (5, 5))
a, b = special.lqmn(4, 0, 1.1)
assert_equal(a.shape, (5, 1))
assert_equal(b.shape, (5, 1))
def test_lqn(self):
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]),
array([1.3333, 1.216, -0.8427])),4)
class TestMathieu(TestCase):
def test_mathieu_a(self):
pass
def test_mathieu_even_coef(self):
mc = special.mathieu_even_coef(2,5)
# Q not defined broken and cannot figure out proper reporting order
def test_mathieu_odd_coef(self):
pass
# same problem as above
class TestFresnelIntegral(TestCase):
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
class TestOblCvSeq(TestCase):
def test_obl_cv_seq(self):
obl = special.obl_cv_seq(0,3,1)
assert_array_almost_equal(obl,array([-0.348602,
1.393206,
5.486800,
11.492120]),5)
class TestParabolicCylinder(TestCase):
def test_pbdn_seq(self):
pb = special.pbdn_seq(1,.1)
assert_array_almost_equal(pb,(array([0.9975,
0.0998]),
array([-0.0499,
0.9925])),4)
def test_pbdv(self):
pbv = special.pbdv(1,.2)
derrl = 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = special.pbdn_seq(1,.1)
pbv = special.pbdv_seq(1,.1)
assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)
assert_tol_equal(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_tol_equal(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_tol_equal(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
class TestPolygamma(TestCase):
# from Table 6.2 (pg. 271) of A&S
def test_polygamma(self):
poly2 = special.polygamma(2,1)
poly3 = special.polygamma(3,1)
assert_almost_equal(poly2,-2.4041138063,10)
assert_almost_equal(poly3,6.4939394023,10)
# Test polygamma(0, x) == psi(x)
x = [2, 3, 1.1e14]
assert_almost_equal(special.polygamma(0, x), special.psi(x))
# Test broadcasting
n = [0, 1, 2]
x = [0.5, 1.5, 2.5]
expected = [-1.9635100260214238, 0.93480220054467933,
-0.23620405164172739]
assert_almost_equal(special.polygamma(n, x), expected)
expected = np.row_stack([expected]*2)
assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
expected)
assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
expected)
class TestProCvSeq(TestCase):
def test_pro_cv_seq(self):
prol = special.pro_cv_seq(0,3,1)
assert_array_almost_equal(prol,array([0.319000,
2.593084,
6.533471,
12.514462]),5)
class TestPsi(TestCase):
def test_psi(self):
ps = special.psi(1)
assert_almost_equal(ps,-0.57721566490153287,8)
class TestRadian(TestCase):
def test_radian(self):
rad = special.radian(90,0,0)
assert_almost_equal(rad,pi/2.0,5)
def test_radianmore(self):
rad1 = special.radian(90,1,60)
assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
class TestRiccati(TestCase):
def test_riccati_jn(self):
jnrl = (special.sph_jn(1,.2)[0]*.2,special.sph_jn(1,.2)[0]+special.sph_jn(1,.2)[1]*.2)
ricjn = special.riccati_jn(1,.2)
assert_array_almost_equal(ricjn,jnrl,8)
def test_riccati_yn(self):
ynrl = (special.sph_yn(1,.2)[0]*.2,special.sph_yn(1,.2)[0]+special.sph_yn(1,.2)[1]*.2)
ricyn = special.riccati_yn(1,.2)
assert_array_almost_equal(ricyn,ynrl,8)
class TestRound(TestCase):
def test_round(self):
rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))))
# Note: According to the documentation, scipy.special.round is
# supposed to round to the nearest even number if the fractional
# part is exactly 0.5. On some platforms, this does not appear
# to work and thus this test may fail. However, this unit test is
# correctly written.
rndrl = (10,10,10,11)
assert_array_equal(rnd,rndrl)
def test_sph_harm():
# Tests derived from tables in
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
sh = special.sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
yield (assert_array_almost_equal, sh(0,0,0,0),
0.5/sqrt(pi))
yield (assert_array_almost_equal, sh(-2,2,0.,pi/4),
0.25*sqrt(15./(2.*pi)) *
(sin(pi/4))**2.)
yield (assert_array_almost_equal, sh(-2,2,0.,pi/2),
0.25*sqrt(15./(2.*pi)))
yield (assert_array_almost_equal, sh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi)) *
exp(0+2.*pi*1j)*sin(pi/2.)**2.)
yield (assert_array_almost_equal, sh(2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi)) *
exp(0+2.*pi/4.*1j) *
sin(pi/3.)**2. *
(7.*cos(pi/3.)**2.-1))
yield (assert_array_almost_equal, sh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi)) *
exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)
class TestSpherical(TestCase):
def test_sph_harm(self):
# see test_sph_harm function
pass
def test_sph_in(self):
i1n = special.sph_in(1,.2)
inp0 = (i1n[0][1])
inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
assert_array_almost_equal(i1n[0],array([1.0066800127054699381,
0.066933714568029540839]),12)
assert_array_almost_equal(i1n[1],[inp0,inp1],12)
def test_sph_inkn(self):
spikn = r_[special.sph_in(1,.2) + special.sph_kn(1,.2)]
inkn = r_[special.sph_inkn(1,.2)]
assert_array_almost_equal(inkn,spikn,10)
def test_sph_jn(self):
s1 = special.sph_jn(2,.2)
s10 = -s1[0][1]
s11 = s1[0][0]-2.0/0.2*s1[0][1]
s12 = s1[0][1]-3.0/0.2*s1[0][2]
assert_array_almost_equal(s1[0],[0.99334665397530607731,
0.066400380670322230863,
0.0026590560795273856680],12)
assert_array_almost_equal(s1[1],[s10,s11,s12],12)
def test_sph_jnyn(self):
jnyn = r_[special.sph_jn(1,.2) + special.sph_yn(1,.2)] # tuple addition
jnyn1 = r_[special.sph_jnyn(1,.2)]
assert_array_almost_equal(jnyn1,jnyn,9)
def test_sph_kn(self):
kn = special.sph_kn(2,.2)
kn0 = -kn[0][1]
kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
assert_array_almost_equal(kn[0],[6.4302962978445670140,
38.581777787067402086,
585.15696310385559829],12)
assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
def test_sph_yn(self):
sy1 = special.sph_yn(2,.2)[0][2]
sy2 = special.sph_yn(0,.2)[0][0]
sphpy = (special.sph_yn(1,.2)[0][0]-2*special.sph_yn(2,.2)[0][2])/3 # correct derivative value
assert_almost_equal(sy1,-377.52483,5) # previous values in the system
assert_almost_equal(sy2,-4.9003329,5)
sy3 = special.sph_yn(1,.2)[1][1]
assert_almost_equal(sy3,sphpy,4) # compare correct derivative val. (correct =-system val).
class TestStruve(object):
def _series(self, v, z, n=100):
"""Compute Struve function & error estimate from its power series."""
k = arange(0, n)
r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)
err = abs(r).max() * finfo(float_).eps * n
return r.sum(), err
def test_vs_series(self):
"""Check Struve function versus its power series"""
for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
for z in [1, 10, 19, 21, 30]:
value, err = self._series(v, z)
assert_tol_equal(special.struve(v, z), value, rtol=0, atol=err), (v, z)
def test_some_values(self):
assert_tol_equal(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
assert_tol_equal(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
assert_tol_equal(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
assert_tol_equal(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
assert_equal(special.struve(-12, -41), -special.struve(-12, 41))
assert_equal(special.struve(+12, -41), -special.struve(+12, 41))
assert_equal(special.struve(-11, -41), +special.struve(-11, 41))
assert_equal(special.struve(+11, -41), +special.struve(+11, 41))
assert_(isnan(special.struve(-7.1, -1)))
assert_(isnan(special.struve(-10.1, -1)))
def test_regression_679(self):
"""Regression test for #679"""
assert_tol_equal(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))
assert_tol_equal(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))
assert_tol_equal(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))
def test_chi2_smalldf():
assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)
def test_chi2c_smalldf():
assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)
def test_chi2_inv_smalldf():
assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)
def test_agm_simple():
assert_allclose(special.agm(24, 6), 13.4581714817)
assert_allclose(special.agm(1e30, 1), 2.2292230559453832047768593e28)
def test_legacy():
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.simplefilter("ignore", RuntimeWarning)
# Legacy behavior: truncating arguments to integers
assert_equal(special.bdtrc(1, 2, 0.3), special.bdtrc(1.8, 2.8, 0.3))
assert_equal(special.bdtr(1, 2, 0.3), special.bdtr(1.8, 2.8, 0.3))
assert_equal(special.bdtri(1, 2, 0.3), special.bdtri(1.8, 2.8, 0.3))
assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3))
assert_equal(special.hyp2f0(1, 2, 0.3, 1), special.hyp2f0(1, 2, 0.3, 1.8))
assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3))
assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3))
assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3))
assert_equal(special.pdtrc(1, 0.3), special.pdtrc(1.8, 0.3))
assert_equal(special.pdtr(1, 0.3), special.pdtr(1.8, 0.3))
assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3))
assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3))
assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3))
assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3))
assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3))
finally:
warn_ctx.__exit__()
@with_special_errors
def test_error_raising():
assert_raises(special.SpecialFunctionWarning, special.iv, 1, 1e99j)
def test_xlogy():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x*np.log(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float)
z2 = np.r_[z1, [(0, 1j), (1, 1j)]]
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13)
w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1])
assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13)
def test_xlog1py():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x * np.log1p(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0),
(1, 1e-30)], dtype=float)
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13)
if __name__ == "__main__":
run_module_suite()
TST: special: fix inf test to be compatible with old Numpy
# this program corresponds to special.py
### Means test is not done yet
# E Means test is giving error (E)
# F Means test is failing (F)
# EF Means test is giving error and Failing
#! Means test is segfaulting
# 8 Means test runs forever
### test_besselpoly
### test_mathieu_a
### test_mathieu_even_coef
### test_mathieu_odd_coef
### test_modfresnelp
### test_modfresnelm
# test_pbdv_seq
### test_pbvv_seq
### test_sph_harm
# test_sph_in
# test_sph_jn
# test_sph_kn
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy import array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp, \
log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_
from numpy.testing import assert_equal, assert_almost_equal, \
assert_array_equal, assert_array_almost_equal, assert_approx_equal, \
assert_, rand, dec, TestCase, run_module_suite, assert_allclose, \
assert_raises
from numpy.testing.utils import WarningManager
from scipy import special
import scipy.special._ufuncs as cephes
from scipy.special import ellipk
from scipy.special._testutils import assert_tol_equal, with_special_errors, \
assert_func_equal
class TestCephes(TestCase):
def test_airy(self):
cephes.airy(0)
def test_airye(self):
cephes.airye(0)
def test_binom(self):
n = np.array([0.264, 4, 5.2, 17])
k = np.array([2, 0.4, 7, 3.3])
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389,
-0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846],
[10.92, 2.22993515861399, -0.00585728, 10.468891352063146],
[136, 3.5252179590758828, 19448, 1024.5526916174495]])
assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13)
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_2(self):
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.logspace(1, 300, 20)]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_exact(self):
@np.vectorize
def binom_int(n, k):
n = int(n)
k = int(k)
num = int(1)
den = int(1)
for i in range(1, k+1):
num *= i + n - k
den *= i
return float(num/den)
np.random.seed(1234)
n = np.arange(1, 15)
k = np.arange(0, 15)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
nk = nk[nk[:,0] >= nk[:,1]]
assert_func_equal(cephes.binom,
binom_int(nk[:,0], nk[:,1]),
nk,
atol=0, rtol=0)
def test_bdtr(self):
assert_equal(cephes.bdtr(1,1,0.5),1.0)
def test_bdtri(self):
assert_equal(cephes.bdtri(1,3,0.5),0.5)
def test_bdtrc(self):
assert_equal(cephes.bdtrc(1,3,0.5),0.5)
def test_bdtrin(self):
assert_equal(cephes.bdtrin(1,0,1),5.0)
def test_bdtrik(self):
cephes.bdtrik(1,3,0.5)
def test_bei(self):
assert_equal(cephes.bei(0),0.0)
def test_beip(self):
assert_equal(cephes.beip(0),0.0)
def test_ber(self):
assert_equal(cephes.ber(0),1.0)
def test_berp(self):
assert_equal(cephes.berp(0),0.0)
def test_besselpoly(self):
assert_equal(cephes.besselpoly(0,0,0),1.0)
def test_beta(self):
assert_equal(cephes.beta(1,1),1.0)
def test_betainc(self):
assert_equal(cephes.betainc(1,1,1),1.0)
def test_betaln(self):
assert_equal(cephes.betaln(1,1),0.0)
def test_betaincinv(self):
assert_equal(cephes.betaincinv(1,1,1),1.0)
def test_beta_inf(self):
assert_(np.isinf(special.beta(-1, 2)))
def test_btdtr(self):
assert_equal(cephes.btdtr(1,1,1),1.0)
def test_btdtri(self):
assert_equal(cephes.btdtri(1,1,1),1.0)
def test_btdtria(self):
assert_equal(cephes.btdtria(1,1,1),5.0)
def test_btdtrib(self):
assert_equal(cephes.btdtrib(1,1,1),5.0)
def test_cbrt(self):
assert_approx_equal(cephes.cbrt(1),1.0)
def test_chdtr(self):
assert_equal(cephes.chdtr(1,0),0.0)
def test_chdtrc(self):
assert_equal(cephes.chdtrc(1,0),1.0)
def test_chdtri(self):
assert_equal(cephes.chdtri(1,1),0.0)
def test_chdtriv(self):
assert_equal(cephes.chdtriv(0,0),5.0)
def test_chndtr(self):
assert_equal(cephes.chndtr(0,1,0),0.0)
p = cephes.chndtr(np.linspace(20, 25, 5), 2, 1.07458615e+02)
assert_allclose(p, [1.21805009e-09, 2.81979982e-09, 6.25652736e-09,
1.33520017e-08, 2.74909967e-08],
rtol=1e-6, atol=0)
def test_chndtridf(self):
assert_equal(cephes.chndtridf(0,0,1),5.0)
def test_chndtrinc(self):
assert_equal(cephes.chndtrinc(0,1,0),5.0)
def test_chndtrix(self):
assert_equal(cephes.chndtrix(0,1,0),0.0)
def test_cosdg(self):
assert_equal(cephes.cosdg(0),1.0)
def test_cosm1(self):
assert_equal(cephes.cosm1(0),0.0)
def test_cotdg(self):
assert_almost_equal(cephes.cotdg(45),1.0)
def test_dawsn(self):
assert_equal(cephes.dawsn(0),0.0)
assert_allclose(cephes.dawsn(1.23), 0.50053727749081767)
def test_ellipe(self):
assert_equal(cephes.ellipe(1),1.0)
def test_ellipeinc(self):
assert_equal(cephes.ellipeinc(0,1),0.0)
def test_ellipj(self):
cephes.ellipj(0,1)
def test_ellipk(self):
assert_allclose(ellipk(0), pi/2)
def test_ellipkinc(self):
assert_equal(cephes.ellipkinc(0,0),0.0)
def test_erf(self):
assert_equal(cephes.erf(0),0.0)
def test_erfc(self):
assert_equal(cephes.erfc(0),1.0)
def test_exp1(self):
cephes.exp1(1)
def test_expi(self):
cephes.expi(1)
def test_expn(self):
cephes.expn(1,1)
def test_exp1_reg(self):
# Regression for #834
a = cephes.exp1(-complex(19.9999990))
b = cephes.exp1(-complex(19.9999991))
assert_array_almost_equal(a.imag, b.imag)
def test_exp10(self):
assert_approx_equal(cephes.exp10(2),100.0)
def test_exp2(self):
assert_equal(cephes.exp2(2),4.0)
def test_expm1(self):
assert_equal(cephes.expm1(0),0.0)
def test_fdtr(self):
assert_equal(cephes.fdtr(1,1,0),0.0)
def test_fdtrc(self):
assert_equal(cephes.fdtrc(1,1,0),1.0)
def test_fdtri(self):
# cephes.fdtri(1,1,0.5) #BUG: gives NaN, should be 1
assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]),
array([0.9937365, 1.00630298]), rtol=1e-6)
def test_fdtridfd(self):
assert_equal(cephes.fdtridfd(1,0,0),5.0)
def test_fresnel(self):
assert_equal(cephes.fresnel(0),(0.0,0.0))
def test_gamma(self):
assert_equal(cephes.gamma(5),24.0)
def test_gammainc(self):
assert_equal(cephes.gammainc(5,0),0.0)
def test_gammaincc(self):
assert_equal(cephes.gammaincc(5,0),1.0)
def test_gammainccinv(self):
assert_equal(cephes.gammainccinv(5,1),0.0)
def test_gammaln(self):
cephes.gammaln(10)
def test_gammasgn(self):
vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64)
assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals)))
def test_gdtr(self):
assert_equal(cephes.gdtr(1,1,0),0.0)
def test_gdtrc(self):
assert_equal(cephes.gdtrc(1,1,0),1.0)
def test_gdtria(self):
assert_equal(cephes.gdtria(0,1,1),0.0)
def test_gdtrib(self):
cephes.gdtrib(1,0,1)
# assert_equal(cephes.gdtrib(1,0,1),5.0)
def test_gdtrix(self):
cephes.gdtrix(1,1,.1)
def test_hankel1(self):
cephes.hankel1(1,1)
def test_hankel1e(self):
cephes.hankel1e(1,1)
def test_hankel2(self):
cephes.hankel2(1,1)
def test_hankel2e(self):
cephes.hankel2e(1,1)
def test_hyp1f1(self):
assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
cephes.hyp1f1(1,1,1)
def test_hyp1f2(self):
cephes.hyp1f2(1,1,1,1)
def test_hyp2f0(self):
cephes.hyp2f0(1,1,1,1)
def test_hyp2f1(self):
assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
def test_hyp3f0(self):
assert_equal(cephes.hyp3f0(1,1,1,0),(1.0,0.0))
def test_hyperu(self):
assert_equal(cephes.hyperu(0,1,1),1.0)
def test_i0(self):
assert_equal(cephes.i0(0),1.0)
def test_i0e(self):
assert_equal(cephes.i0e(0),1.0)
def test_i1(self):
assert_equal(cephes.i1(0),0.0)
def test_i1e(self):
assert_equal(cephes.i1e(0),0.0)
def test_it2i0k0(self):
cephes.it2i0k0(1)
def test_it2j0y0(self):
cephes.it2j0y0(1)
def test_it2struve0(self):
cephes.it2struve0(1)
def test_itairy(self):
cephes.itairy(1)
def test_iti0k0(self):
assert_equal(cephes.iti0k0(0),(0.0,0.0))
def test_itj0y0(self):
assert_equal(cephes.itj0y0(0),(0.0,0.0))
def test_itmodstruve0(self):
assert_equal(cephes.itmodstruve0(0),0.0)
def test_itstruve0(self):
assert_equal(cephes.itstruve0(0),0.0)
def test_iv(self):
assert_equal(cephes.iv(1,0),0.0)
def _check_ive(self):
assert_equal(cephes.ive(1,0),0.0)
def test_j0(self):
assert_equal(cephes.j0(0),1.0)
def test_j1(self):
assert_equal(cephes.j1(0),0.0)
def test_jn(self):
assert_equal(cephes.jn(0,0),1.0)
def test_jv(self):
assert_equal(cephes.jv(0,0),1.0)
def _check_jve(self):
assert_equal(cephes.jve(0,0),1.0)
def test_k0(self):
cephes.k0(2)
def test_k0e(self):
cephes.k0e(2)
def test_k1(self):
cephes.k1(2)
def test_k1e(self):
cephes.k1e(2)
def test_kei(self):
cephes.kei(2)
def test_keip(self):
assert_equal(cephes.keip(0),0.0)
def test_ker(self):
cephes.ker(2)
def test_kerp(self):
cephes.kerp(2)
def _check_kelvin(self):
cephes.kelvin(2)
def test_kn(self):
cephes.kn(1,1)
def test_kolmogi(self):
assert_equal(cephes.kolmogi(1),0.0)
assert_(np.isnan(cephes.kolmogi(np.nan)))
def test_kolmogorov(self):
assert_equal(cephes.kolmogorov(0),1.0)
def _check_kv(self):
cephes.kv(1,1)
def _check_kve(self):
cephes.kve(1,1)
def test_log1p(self):
assert_equal(cephes.log1p(0),0.0)
def test_lpmv(self):
assert_equal(cephes.lpmv(0,0,1),1.0)
def test_mathieu_a(self):
assert_equal(cephes.mathieu_a(1,0),1.0)
def test_mathieu_b(self):
assert_equal(cephes.mathieu_b(1,0),1.0)
def test_mathieu_cem(self):
assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
# Test AMS 20.2.27
@np.vectorize
def ce_smallq(m, q, z):
z *= np.pi/180
if m == 0:
return 2**(-0.5) * (1 - .5*q*cos(2*z)) # + O(q^2)
elif m == 1:
return cos(z) - q/8 * cos(3*z) # + O(q^2)
elif m == 2:
return cos(2*z) - q*(cos(4*z)/12 - 1/4) # + O(q^2)
else:
return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(0, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0],
ce_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_sem(self):
assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
# Test AMS 20.2.27
@np.vectorize
def se_smallq(m, q, z):
z *= np.pi/180
if m == 1:
return sin(z) - q/8 * sin(3*z) # + O(q^2)
elif m == 2:
return sin(2*z) - q*sin(4*z)/12 # + O(q^2)
else:
return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(1, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0],
se_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_modcem1(self):
assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
def test_mathieu_modcem2(self):
cephes.mathieu_modcem2(1,1,1)
# Test reflection relation AMS 20.6.19
m = np.arange(0, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modcem2(m, q, -z)[0]
fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0]
y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_modsem1(self):
assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
def test_mathieu_modsem2(self):
cephes.mathieu_modsem2(1,1,1)
# Test reflection relation AMS 20.6.20
m = np.arange(1, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modsem2(m, q, -z)[0]
fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1]
y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_overflow(self):
# Check that these return NaNs instead of causing a SEGV
assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan))
def test_mathieu_ticket_1847(self):
# Regression test --- this call had some out-of-bounds access
# and could return nan occasionally
for k in range(60):
v = cephes.mathieu_modsem2(2, 100, -1)
# Values from ACM TOMS 804 (derivate by numerical differentiation)
assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10)
assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4)
def test_modfresnelm(self):
cephes.modfresnelm(0)
def test_modfresnelp(self):
cephes.modfresnelp(0)
def _check_modstruve(self):
assert_equal(cephes.modstruve(1,0),0.0)
def test_nbdtr(self):
assert_equal(cephes.nbdtr(1,1,1),1.0)
def test_nbdtrc(self):
assert_equal(cephes.nbdtrc(1,1,1),0.0)
def test_nbdtri(self):
assert_equal(cephes.nbdtri(1,1,1),1.0)
def __check_nbdtrik(self):
cephes.nbdtrik(1,.4,.5)
def test_nbdtrin(self):
assert_equal(cephes.nbdtrin(1,0,0),5.0)
def test_ncfdtr(self):
assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def test_ncfdtri(self):
assert_equal(cephes.ncfdtri(1,1,1,0),0.0)
def test_ncfdtridfd(self):
cephes.ncfdtridfd(1,0.5,0,1)
def __check_ncfdtridfn(self):
cephes.ncfdtridfn(1,0.5,0,1)
def __check_ncfdtrinc(self):
cephes.ncfdtrinc(1,0.5,0,1)
def test_nctdtr(self):
assert_equal(cephes.nctdtr(1,0,0),0.5)
def __check_nctdtridf(self):
cephes.nctdtridf(1,0.5,0)
def test_nctdtrinc(self):
cephes.nctdtrinc(1,0,0)
def test_nctdtrit(self):
cephes.nctdtrit(.1,0.2,.5)
def test_ndtr(self):
assert_equal(cephes.ndtr(0), 0.5)
assert_almost_equal(cephes.ndtr(1), 0.84134474606)
def test_ndtri(self):
assert_equal(cephes.ndtri(0.5),0.0)
def test_nrdtrimn(self):
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def test_nrdtrisd(self):
assert_tol_equal(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,
atol=0, rtol=0)
def test_obl_ang1(self):
cephes.obl_ang1(1,1,1,0)
def test_obl_ang1_cv(self):
result = cephes.obl_ang1_cv(1,1,1,1,0)
assert_almost_equal(result[0],1.0)
assert_almost_equal(result[1],0.0)
def _check_obl_cv(self):
assert_equal(cephes.obl_cv(1,1,0),2.0)
def test_obl_rad1(self):
cephes.obl_rad1(1,1,1,0)
def test_obl_rad1_cv(self):
cephes.obl_rad1_cv(1,1,1,1,0)
def test_obl_rad2(self):
cephes.obl_rad2(1,1,1,0)
def test_obl_rad2_cv(self):
cephes.obl_rad2_cv(1,1,1,1,0)
def test_pbdv(self):
assert_equal(cephes.pbdv(1,0),(0.0,1.0))
def test_pbvv(self):
cephes.pbvv(1,0)
def test_pbwa(self):
cephes.pbwa(1,0)
def test_pdtr(self):
cephes.pdtr(0,1)
def test_pdtrc(self):
cephes.pdtrc(0,1)
def test_pdtri(self):
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.simplefilter("ignore", RuntimeWarning)
cephes.pdtri(0.5,0.5)
finally:
warn_ctx.__exit__()
def test_pdtrik(self):
cephes.pdtrik(0.5,1)
def test_pro_ang1(self):
cephes.pro_ang1(1,1,1,0)
def test_pro_ang1_cv(self):
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
array((1.0,0.0)))
def _check_pro_cv(self):
assert_equal(cephes.pro_cv(1,1,0),2.0)
def test_pro_rad1(self):
cephes.pro_rad1(1,1,1,0.1)
def test_pro_rad1_cv(self):
cephes.pro_rad1_cv(1,1,1,1,0)
def test_pro_rad2(self):
cephes.pro_rad2(1,1,1,0)
def test_pro_rad2_cv(self):
cephes.pro_rad2_cv(1,1,1,1,0)
def test_psi(self):
cephes.psi(1)
def test_radian(self):
assert_equal(cephes.radian(0,0,0),0)
def test_rgamma(self):
assert_equal(cephes.rgamma(1),1.0)
def test_round(self):
assert_equal(cephes.round(3.4),3.0)
assert_equal(cephes.round(-3.4),-3.0)
assert_equal(cephes.round(3.6),4.0)
assert_equal(cephes.round(-3.6),-4.0)
assert_equal(cephes.round(3.5),4.0)
assert_equal(cephes.round(-3.5),-4.0)
def test_shichi(self):
cephes.shichi(1)
def test_sici(self):
cephes.sici(1)
s, c = cephes.sici(np.inf)
assert_almost_equal(s, np.pi * 0.5)
assert_almost_equal(c, 0)
s, c = cephes.sici(-np.inf)
assert_almost_equal(s, -np.pi * 0.5)
assert_(np.isnan(c), "cosine integral(-inf) is not nan")
def test_sindg(self):
assert_equal(cephes.sindg(90),1.0)
def test_smirnov(self):
assert_equal(cephes.smirnov(1,.1),0.9)
assert_(np.isnan(cephes.smirnov(1,np.nan)))
def test_smirnovi(self):
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
assert_(np.isnan(cephes.smirnovi(1,np.nan)))
def test_spence(self):
assert_equal(cephes.spence(1),0.0)
def test_stdtr(self):
assert_equal(cephes.stdtr(1,0),0.5)
assert_almost_equal(cephes.stdtr(1,1), 0.75)
assert_almost_equal(cephes.stdtr(1,2), 0.852416382349)
def test_stdtridf(self):
cephes.stdtridf(0.7,1)
def test_stdtrit(self):
cephes.stdtrit(1,0.7)
def test_struve(self):
assert_equal(cephes.struve(0,0),0.0)
def test_tandg(self):
assert_equal(cephes.tandg(45),1.0)
def test_tklmbda(self):
assert_almost_equal(cephes.tklmbda(1,1),1.0)
def test_y0(self):
cephes.y0(1)
def test_y1(self):
cephes.y1(1)
def test_yn(self):
cephes.yn(1,1)
def test_yv(self):
cephes.yv(1,1)
def _check_yve(self):
cephes.yve(1,1)
def test_zeta(self):
cephes.zeta(2,2)
def test_zetac(self):
assert_equal(cephes.zetac(0),-1.5)
def test_wofz(self):
z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.),
complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.),
complex(-0.0000000234545,1.1234), complex(-3.,5.1),
complex(-53,30.1), complex(0.0,0.12345),
complex(11,1), complex(-22,-2), complex(9,-28),
complex(21,-33), complex(1e5,1e5), complex(1e14,1e14)
]
w = [
complex(-3.78270245518980507452677445620103199303131110e-7,
0.000903861276433172057331093754199933411710053155),
complex(0.1764906227004816847297495349730234591778719532788,
-0.02146550539468457616788719893991501311573031095617),
complex(0.2410250715772692146133539023007113781272362309451,
0.06087579663428089745895459735240964093522265589350),
complex(0.30474420525691259245713884106959496013413834051768,
-0.20821893820283162728743734725471561394145872072738),
complex(7.317131068972378096865595229600561710140617977e34,
8.321873499714402777186848353320412813066170427e34),
complex(0.0615698507236323685519612934241429530190806818395,
-0.00676005783716575013073036218018565206070072304635),
complex(0.3960793007699874918961319170187598400134746631,
-5.593152259116644920546186222529802777409274656e-9),
complex(0.08217199226739447943295069917990417630675021771804,
-0.04701291087643609891018366143118110965272615832184),
complex(0.00457246000350281640952328010227885008541748668738,
-0.00804900791411691821818731763401840373998654987934),
complex(0.8746342859608052666092782112565360755791467973338452,
0.),
complex(0.00468190164965444174367477874864366058339647648741,
0.0510735563901306197993676329845149741675029197050),
complex(-0.0023193175200187620902125853834909543869428763219,
-0.025460054739731556004902057663500272721780776336),
complex(9.11463368405637174660562096516414499772662584e304,
3.97101807145263333769664875189354358563218932e305),
complex(-4.4927207857715598976165541011143706155432296e281,
-2.8019591213423077494444700357168707775769028e281),
complex(2.820947917809305132678577516325951485807107151e-6,
2.820947917668257736791638444590253942253354058e-6),
complex(2.82094791773878143474039725787438662716372268e-15,
2.82094791773878143474039725773333923127678361e-15)
]
assert_func_equal(cephes.wofz, w, z, rtol=1e-13)
class TestAiry(TestCase):
def test_airy(self):
# This tests the airy function to ensure 8 place accuracy in computation
x = special.airy(.99)
assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
x = special.airy(.41)
assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
x = special.airy(-.36)
assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
def test_airye(self):
a = special.airye(0.01)
b = special.airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_array_almost_equal(a,b1,6)
def test_bi_zeros(self):
bi = special.bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([0.60195789, -0.76031014]))
assert_array_almost_equal(bi,bia,4)
def test_ai_zeros(self):
ai = special.ai_zeros(1)
assert_array_almost_equal(ai,(array([-2.33810741]),
array([-1.01879297]),
array([0.5357]),
array([0.7012])),4)
class TestAssocLaguerre(TestCase):
def test_assoc_laguerre(self):
a1 = special.genlaguerre(11,1)
a2 = special.assoc_laguerre(.2,11,1)
assert_array_almost_equal(a2,a1(.2),8)
a2 = special.assoc_laguerre(1,11,1)
assert_array_almost_equal(a2,a1(1),8)
class TestBesselpoly(TestCase):
def test_besselpoly(self):
pass
class TestKelvin(TestCase):
def test_bei(self):
mbei = special.bei(2)
assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact
def test_beip(self):
mbeip = special.beip(2)
assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact
def test_ber(self):
mber = special.ber(2)
assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact
def test_berp(self):
mberp = special.berp(2)
assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact
def test_bei_zeros(self):
bi = special.bi_zeros(5)
assert_array_almost_equal(bi[0],array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),11)
assert_array_almost_equal(bi[1],array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),10)
assert_array_almost_equal(bi[2],array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),11)
assert_array_almost_equal(bi[3],array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),11)
def test_beip_zeros(self):
bip = special.beip_zeros(5)
assert_array_almost_equal(bip,array([3.772673304934953,
8.280987849760042,
12.742147523633703,
17.193431752512542,
21.641143941167325]),4)
def test_ber_zeros(self):
ber = special.ber_zeros(5)
assert_array_almost_equal(ber,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
def test_berp_zeros(self):
brp = special.berp_zeros(5)
assert_array_almost_equal(brp,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
def test_kelvin(self):
mkelv = special.kelvin(2)
assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,
special.ker(2) + special.kei(2)*1j,
special.berp(2) + special.beip(2)*1j,
special.kerp(2) + special.keip(2)*1j),8)
def test_kei(self):
mkei = special.kei(2)
assert_almost_equal(mkei,-0.20240006776470432,5)
def test_keip(self):
mkeip = special.keip(2)
assert_almost_equal(mkeip,0.21980790991960536,5)
def test_ker(self):
mker = special.ker(2)
assert_almost_equal(mker,-0.041664513991509472,5)
def test_kerp(self):
mkerp = special.kerp(2)
assert_almost_equal(mkerp,-0.10660096588105264,5)
def test_kei_zeros(self):
kei = special.kei_zeros(5)
assert_array_almost_equal(kei,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
def test_keip_zeros(self):
keip = special.keip_zeros(5)
assert_array_almost_equal(keip,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
# numbers come from 9.9 of A&S pg. 381
def test_kelvin_zeros(self):
tmp = special.kelvin_zeros(5)
berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
assert_array_almost_equal(berz,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
assert_array_almost_equal(beiz,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
assert_array_almost_equal(kerz,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44382]),4)
assert_array_almost_equal(keiz,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
assert_array_almost_equal(berpz,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
assert_array_almost_equal(beipz,array([3.77267,
# table from 1927 had 3.77320
# but this is more accurate
8.28099,
12.74215,
17.19343,
21.64114]),4)
assert_array_almost_equal(kerpz,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
assert_array_almost_equal(keipz,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
def test_ker_zeros(self):
ker = special.ker_zeros(5)
assert_array_almost_equal(ker,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44381]),4)
def test_kerp_zeros(self):
kerp = special.kerp_zeros(5)
assert_array_almost_equal(kerp,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
class TestBernoulli(TestCase):
def test_bernoulli(self):
brn = special.bernoulli(5)
assert_array_almost_equal(brn,array([1.0000,
-0.5000,
0.1667,
0.0000,
-0.0333,
0.0000]),4)
class TestBeta(TestCase):
def test_beta(self):
bet = special.beta(2,4)
betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
assert_almost_equal(bet,betg,8)
def test_betaln(self):
betln = special.betaln(2,4)
bet = log(abs(special.beta(2,4)))
assert_almost_equal(betln,bet,8)
def test_betainc(self):
btinc = special.betainc(1,1,.2)
assert_almost_equal(btinc,0.2,8)
def test_betaincinv(self):
y = special.betaincinv(2,4,.5)
comp = special.betainc(2,4,y)
assert_almost_equal(comp,.5,5)
class TestTrigonometric(TestCase):
def test_cbrt(self):
cb = special.cbrt(27)
cbrl = 27**(1.0/3.0)
assert_approx_equal(cb,cbrl)
def test_cbrtmore(self):
cb1 = special.cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_almost_equal(cb1,cbrl1,8)
def test_cosdg(self):
cdg = special.cosdg(90)
cdgrl = cos(pi/2.0)
assert_almost_equal(cdg,cdgrl,8)
def test_cosdgmore(self):
cdgm = special.cosdg(30)
cdgmrl = cos(pi/6.0)
assert_almost_equal(cdgm,cdgmrl,8)
def test_cosm1(self):
cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_array_almost_equal(cs,csrl,8)
def test_cotdg(self):
ct = special.cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_almost_equal(ct,ctrl,8)
def test_cotdgmore(self):
ct1 = special.cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_almost_equal(ct1,ctrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.cotdg(45), 1.0, 14)
assert_almost_equal(special.cotdg(-45), -1.0, 14)
assert_almost_equal(special.cotdg(90), 0.0, 14)
assert_almost_equal(special.cotdg(-90), 0.0, 14)
assert_almost_equal(special.cotdg(135), -1.0, 14)
assert_almost_equal(special.cotdg(-135), 1.0, 14)
assert_almost_equal(special.cotdg(225), 1.0, 14)
assert_almost_equal(special.cotdg(-225), -1.0, 14)
assert_almost_equal(special.cotdg(270), 0.0, 14)
assert_almost_equal(special.cotdg(-270), 0.0, 14)
assert_almost_equal(special.cotdg(315), -1.0, 14)
assert_almost_equal(special.cotdg(-315), 1.0, 14)
assert_almost_equal(special.cotdg(765), 1.0, 14)
def test_sinc(self):
c = arange(-2,2,.1)
y = special.sinc(c)
yre = sin(pi*c)/(pi*c)
yre[20] = 1.0
assert_array_almost_equal(y, yre, 4)
# Regression test for ticket 1751.
assert_array_almost_equal(special.sinc([0]), 1)
def test_0(self):
x = 0.0
assert_equal(special.sinc(x),1.0)
def test_sindg(self):
sn = special.sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = special.sindg(30)
snmrl = sin(pi/6.0)
assert_almost_equal(snm,snmrl,8)
snm1 = special.sindg(45)
snmrl1 = sin(pi/4.0)
assert_almost_equal(snm1,snmrl1,8)
class TestTandg(TestCase):
def test_tandg(self):
tn = special.tandg(30)
tnrl = tan(pi/6.0)
assert_almost_equal(tn,tnrl,8)
def test_tandgmore(self):
tnm = special.tandg(45)
tnmrl = tan(pi/4.0)
assert_almost_equal(tnm,tnmrl,8)
tnm1 = special.tandg(60)
tnmrl1 = tan(pi/3.0)
assert_almost_equal(tnm1,tnmrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.tandg(0), 0.0, 14)
assert_almost_equal(special.tandg(45), 1.0, 14)
assert_almost_equal(special.tandg(-45), -1.0, 14)
assert_almost_equal(special.tandg(135), -1.0, 14)
assert_almost_equal(special.tandg(-135), 1.0, 14)
assert_almost_equal(special.tandg(180), 0.0, 14)
assert_almost_equal(special.tandg(-180), 0.0, 14)
assert_almost_equal(special.tandg(225), 1.0, 14)
assert_almost_equal(special.tandg(-225), -1.0, 14)
assert_almost_equal(special.tandg(315), -1.0, 14)
assert_almost_equal(special.tandg(-315), 1.0, 14)
class TestEllip(TestCase):
def test_ellipj_nan(self):
"""Regression test for #912."""
special.ellipj(0.5, np.nan)
def test_ellipj(self):
el = special.ellipj(0.2,0)
rel = [sin(0.2),cos(0.2),1.0,0.20]
assert_array_almost_equal(el,rel,13)
def test_ellipk(self):
elk = special.ellipk(.2)
assert_almost_equal(elk,1.659623598610528,11)
def test_ellipkinc(self):
elkinc = special.ellipkinc(pi/2,.2)
elk = special.ellipk(0.2)
assert_almost_equal(elkinc,elk,15)
alpha = 20*pi/180
phi = 45*pi/180
m = sin(alpha)**2
elkinc = special.ellipkinc(phi,m)
assert_almost_equal(elkinc,0.79398143,8)
# From pg. 614 of A & S
def test_ellipe(self):
ele = special.ellipe(.2)
assert_almost_equal(ele,1.4890350580958529,8)
def test_ellipeinc(self):
eleinc = special.ellipeinc(pi/2,.2)
ele = special.ellipe(0.2)
assert_almost_equal(eleinc,ele,14)
# pg 617 of A & S
alpha, phi = 52*pi/180,35*pi/180
m = sin(alpha)**2
eleinc = special.ellipeinc(phi,m)
assert_almost_equal(eleinc, 0.58823065, 8)
class TestErf(TestCase):
def test_erf(self):
er = special.erf(.25)
assert_almost_equal(er,0.2763263902,8)
def test_erf_zeros(self):
erz = special.erf_zeros(5)
erzr = array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_array_almost_equal(erz,erzr,4)
def _check_variant_func(self, func, other_func, rtol, atol=0):
np.random.seed(1234)
n = 10000
x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
z = x + 1j*y
old_errors = np.seterr(all='ignore')
try:
w = other_func(z)
w_real = other_func(x).real
mask = np.isfinite(w)
w = w[mask]
z = z[mask]
mask = np.isfinite(w_real)
w_real = w_real[mask]
x = x[mask]
# test both real and complex variants
assert_func_equal(func, w, z, rtol=rtol, atol=atol)
assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)
finally:
np.seterr(**old_errors)
def test_erfc_consistent(self):
self._check_variant_func(
cephes.erfc,
lambda z: 1 - cephes.erf(z),
rtol=1e-12,
atol=1e-14 # <- the test function loses precision
)
def test_erfcx_consistent(self):
self._check_variant_func(
cephes.erfcx,
lambda z: np.exp(z*z) * cephes.erfc(z),
rtol=1e-12
)
def test_erfi_consistent(self):
self._check_variant_func(
cephes.erfi,
lambda z: -1j * cephes.erf(1j*z),
rtol=1e-12
)
def test_dawsn_consistent(self):
self._check_variant_func(
cephes.dawsn,
lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),
rtol=1e-12
)
def test_erfcinv(self):
i = special.erfcinv(1)
assert_equal(i,0)
def test_erfinv(self):
i = special.erfinv(0)
assert_equal(i,0)
def test_errprint(self):
a = special.errprint()
b = 1-a # a is the state 1-a inverts state
c = special.errprint(b) # returns last state 'a'
assert_equal(a,c)
d = special.errprint(a) # returns to original state
assert_equal(d,b) # makes sure state was returned
# assert_equal(d,1-a)
class TestEuler(TestCase):
def test_euler(self):
eu0 = special.euler(0)
eu1 = special.euler(1)
eu2 = special.euler(2) # just checking segfaults
assert_almost_equal(eu0[0],1,8)
assert_almost_equal(eu2[2],-1,8)
eu24 = special.euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145,2404879675441,
370371188237525,69348874393137901,
15514534163557086905]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
olderr = np.seterr(all='ignore')
try:
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
finally:
np.seterr(**olderr)
assert_almost_equal(errmax, 0.0, 14)
class TestExp(TestCase):
def test_exp2(self):
ex = special.exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = special.exp2(2.5)
exmrl = 2**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_exp10(self):
ex = special.exp10(2)
exrl = 10**2
assert_approx_equal(ex,exrl)
def test_exp10more(self):
exm = special.exp10(2.5)
exmrl = 10**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_expm1(self):
ex = (special.expm1(2),special.expm1(3),special.expm1(4))
exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
assert_array_almost_equal(ex,exrl,8)
def test_expm1more(self):
ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))
exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
assert_array_almost_equal(ex1,exrl1,8)
class TestFresnel(TestCase):
def test_fresnel(self):
frs = array(special.fresnel(.5))
assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8)
# values from pg 329 Table 7.11 of A & S
# slightly corrected in 4th decimal place
def test_fresnel_zeros(self):
szo, czo = special.fresnel_zeros(5)
assert_array_almost_equal(szo,
array([2.0093+0.2885j,
2.8335+0.2443j,
3.4675+0.2185j,
4.0026+0.2009j,
4.4742+0.1877j]),3)
assert_array_almost_equal(czo,
array([1.7437+0.3057j,
2.6515+0.2529j,
3.3204+0.2240j,
3.8757+0.2047j,
4.3611+0.1907j]),3)
vals1 = special.fresnel(szo)[0]
vals2 = special.fresnel(czo)[1]
assert_array_almost_equal(vals1,0,14)
assert_array_almost_equal(vals2,0,14)
def test_fresnelc_zeros(self):
szo, czo = special.fresnel_zeros(6)
frc = special.fresnelc_zeros(6)
assert_array_almost_equal(frc,czo,12)
def test_fresnels_zeros(self):
szo, czo = special.fresnel_zeros(5)
frs = special.fresnels_zeros(5)
assert_array_almost_equal(frs,szo,12)
class TestGamma(TestCase):
def test_gamma(self):
gam = special.gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = special.gammaln(3)
lngam = log(special.gamma(3))
assert_almost_equal(gamln,lngam,8)
def test_gammainc(self):
gama = special.gammainc(.5,.5)
assert_almost_equal(gama,.7,1)
def test_gammaincnan(self):
gama = special.gammainc(-1,1)
assert_(isnan(gama))
def test_gammainczero(self):
# bad arg but zero integration limit
gama = special.gammainc(-1,0)
assert_equal(gama,0.0)
def test_gammaincc(self):
gicc = special.gammaincc(.5,.5)
greal = 1 - special.gammainc(.5,.5)
assert_almost_equal(gicc,greal,8)
def test_gammainccnan(self):
gama = special.gammaincc(-1,1)
assert_(isnan(gama))
def test_gammainccinv(self):
gccinv = special.gammainccinv(.5,.5)
gcinv = special.gammaincinv(.5,.5)
assert_almost_equal(gccinv,gcinv,8)
@with_special_errors
def test_gammaincinv(self):
y = special.gammaincinv(.4,.4)
x = special.gammainc(.4,y)
assert_almost_equal(x,0.4,1)
y = special.gammainc(10, 0.05)
x = special.gammaincinv(10, 2.5715803516000736e-20)
assert_almost_equal(0.05, x, decimal=10)
assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_almost_equal(11.0, x, decimal=10)
@with_special_errors
def test_975(self):
# Regression test for ticket #975 -- switch point in algorithm
# check that things work OK at the point, immediately next floats
# around it, and a bit further away
pts = [0.25,
np.nextafter(0.25, 0), 0.25 - 1e-12,
np.nextafter(0.25, 1), 0.25 + 1e-12]
for xp in pts:
y = special.gammaincinv(.4, xp)
x = special.gammainc(0.4, y)
assert_tol_equal(x, xp, rtol=1e-12)
def test_rgamma(self):
rgam = special.rgamma(8)
rlgam = 1/special.gamma(8)
assert_almost_equal(rgam,rlgam,8)
def test_infinity(self):
assert_(np.isinf(special.gamma(-1)))
assert_equal(special.rgamma(-1), 0)
class TestHankel(TestCase):
def test_negv1(self):
assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)
def test_hankel1(self):
hank1 = special.hankel1(1,.1)
hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)
assert_almost_equal(hank1,hankrl,8)
def test_negv1e(self):
assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)
def test_hankel1e(self):
hank1e = special.hankel1e(1,.1)
hankrle = special.hankel1(1,.1)*exp(-.1j)
assert_almost_equal(hank1e,hankrle,8)
def test_negv2(self):
assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)
def test_hankel2(self):
hank2 = special.hankel2(1,.1)
hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)
assert_almost_equal(hank2,hankrl2,8)
def test_neg2e(self):
assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)
def test_hankl2e(self):
hank2e = special.hankel2e(1,.1)
hankrl2e = special.hankel2e(1,.1)
assert_almost_equal(hank2e,hankrl2e,8)
class TestHyper(TestCase):
def test_h1vp(self):
h1 = special.h1vp(1,.1)
h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)
assert_almost_equal(h1,h1real,8)
def test_h2vp(self):
h2 = special.h2vp(1,.1)
h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)
assert_almost_equal(h2,h2real,8)
def test_hyp0f1(self):
# scalar input
assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)
# float input, expected values match mpmath
x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
1.37789689539747, 1.60373685288480])
assert_allclose(x, expected, rtol=1e-12)
# complex input
x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
assert_allclose(x, expected.astype(np.complex), rtol=1e-12)
# test broadcasting
x1 = [0.5, 1.5, 2.5]
x2 = [0, 1, 0.5]
x = special.hyp0f1(x1, x2)
expected = [1.0, 1.8134302039235093, 1.21482702689997]
assert_allclose(x, expected, rtol=1e-12)
x = special.hyp0f1(np.row_stack([x1] * 2), x2)
assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
assert_raises(ValueError, special.hyp0f1,
np.row_stack([x1] * 3), [0, 1])
def test_hyp1f1(self):
hyp1 = special.hyp1f1(.1,.1,.3)
assert_almost_equal(hyp1, 1.3498588075760032,7)
# test contributed by Moritz Deger (2008-05-29)
# http://projects.scipy.org/scipy/scipy/ticket/659
# reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
# produced with test_hyp1f1.nb
ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
[2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
[-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
[5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
[-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
[4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
[1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
[2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
[1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
[1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
[-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
[8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
[1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
[-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
[2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
[2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
[6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
[-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
[2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
[8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
[1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
[-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
[2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
[-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
[3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
[-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
[2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
[-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
[1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
[-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
[-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
[-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
[-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
[3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
[6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
[-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
[2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
[1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
[1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
[1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
[1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
[-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
[-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
[7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
[2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
[-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
[-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
[-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
[-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
[-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
[2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
[5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
[-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
[-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
[5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
[-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
[1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
[2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
[5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
[-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
[1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
[6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
[1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
[-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
[-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
[-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
[-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
[1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
[2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
[-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
[2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
[-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
[2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
[1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
[-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
[7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
[2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
[8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
[-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
[-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
[-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
[-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
[-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
[-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
[6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
[-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
[-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
[6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
[-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
[7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
[-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
[5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
[3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
[-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
[2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
[2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
[-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
[-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
[-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
[-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
for a,b,c,expected in ref_data:
result = special.hyp1f1(a,b,c)
assert_(abs(expected - result)/expected < 1e-4)
def test_hyp1f2(self):
pass
def test_hyp2f0(self):
pass
def test_hyp2f1(self):
# a collection of special cases taken from AMS 55
values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
[0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
[1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
[3, 3.5, 1.5, 0.2**2,
0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
[-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
[3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],
[3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) *
special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],
[5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) *
special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],
[4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) *
special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],
# and some others
# ticket #424
[1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
# negative integer a or b, with c-a-b integer and x > 0.9
[-2,3,1,0.95,0.715],
[2,-3,1,0.95,-0.007],
[-6,3,1,0.95,0.0000810625],
[2,-5,1,0.95,-0.000029375],
# huge negative integers
(10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),
(10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),
]
for i, (a, b, c, x, v) in enumerate(values):
cv = special.hyp2f1(a, b, c, x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_hyp3f0(self):
pass
def test_hyperu(self):
val1 = special.hyperu(1,0.1,100)
assert_almost_equal(val1,0.0098153,7)
a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
a,b = asarray(a), asarray(b)
z = 0.5
hypu = special.hyperu(a,b,z)
hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) /
(special.gamma(1+a-b)*special.gamma(b)) -
z**(1-b)*special.hyp1f1(1+a-b,2-b,z)
/ (special.gamma(a)*special.gamma(2-b)))
assert_array_almost_equal(hypu,hprl,12)
def test_hyperu_gh2287(self):
assert_almost_equal(special.hyperu(1, 1.5, 20.2),
0.048360918656699191, 12)
class TestBessel(TestCase):
def test_itj0y0(self):
it0 = array(special.itj0y0(.2))
assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
def test_it2j0y0(self):
it2 = array(special.it2j0y0(.2))
assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
def test_negv_iv(self):
assert_equal(special.iv(3,2), special.iv(-3,2))
def test_j0(self):
oz = special.j0(.1)
ozr = special.jn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_j1(self):
o1 = special.j1(.1)
o1r = special.jn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_jn(self):
jnnr = special.jn(1,.2)
assert_almost_equal(jnnr,0.099500832639235995,8)
def test_negv_jv(self):
assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = special.jv(v, x)
assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
def test_negv_jve(self):
assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)
def test_jve(self):
jvexp = special.jve(1,.2)
assert_almost_equal(jvexp,0.099500832639235995,8)
jvexp1 = special.jve(1,.2+1j)
z = .2+1j
jvexpr = special.jv(1,z)*exp(-abs(z.imag))
assert_almost_equal(jvexp1,jvexpr,8)
def test_jn_zeros(self):
jn0 = special.jn_zeros(0,5)
jn1 = special.jn_zeros(1,5)
assert_array_almost_equal(jn0,array([2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),4)
assert_array_almost_equal(jn1,array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),4)
jn102 = special.jn_zeros(102,5)
assert_tol_equal(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = special.jn_zeros(301,5)
assert_tol_equal(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = special.jn_zeros(0, 300)
assert_tol_equal(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_tol_equal(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_tol_equal(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = special.jn_zeros(10, 300)
assert_tol_equal(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_tol_equal(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_tol_equal(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = special.jn_zeros(3010,5)
assert_tol_equal(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
jn = special.jn
def jnp(n, x):
return (jn(n-1,x) - jn(n+1,x))/2
for nt in range(1, 30):
z, n, m, t = special.jnjnp_zeros(nt)
for zz, nn, tt in zip(z, n, t):
if tt == 0:
assert_allclose(jn(nn, zz), 0, atol=1e-6)
elif tt == 1:
assert_allclose(jnp(nn, zz), 0, atol=1e-6)
else:
raise AssertionError("Invalid t return for nt=%d" % nt)
def test_jnp_zeros(self):
jnp = special.jnp_zeros(1,5)
assert_array_almost_equal(jnp, array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),4)
jnp = special.jnp_zeros(443,5)
assert_tol_equal(special.jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = special.jnyn_zeros(1,5)
assert_array_almost_equal(jnz,(array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),5)
def test_jvp(self):
jvprim = special.jvp(2,2)
jv0 = (special.jv(1,2)-special.jv(3,2))/2
assert_almost_equal(jvprim,jv0,10)
def test_k0(self):
ozk = special.k0(.1)
ozkr = special.kv(0,.1)
assert_almost_equal(ozk,ozkr,8)
def test_k0e(self):
ozke = special.k0e(.1)
ozker = special.kve(0,.1)
assert_almost_equal(ozke,ozker,8)
def test_k1(self):
o1k = special.k1(.1)
o1kr = special.kv(1,.1)
assert_almost_equal(o1k,o1kr,8)
def test_k1e(self):
o1ke = special.k1e(.1)
o1ker = special.kve(1,.1)
assert_almost_equal(o1ke,o1ker,8)
def test_jacobi(self):
a = 5*rand() - 1
b = 5*rand() - 1
P0 = special.jacobi(0,a,b)
P1 = special.jacobi(1,a,b)
P2 = special.jacobi(2,a,b)
P3 = special.jacobi(3,a,b)
assert_array_almost_equal(P0.c,[1],13)
assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
def test_kn(self):
kn1 = special.kn(0,.2)
assert_almost_equal(kn1,1.7527038555281462,8)
def test_negv_kv(self):
assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
def test_kv0(self):
kv0 = special.kv(0,.2)
assert_almost_equal(kv0, 1.7527038555281462, 10)
def test_kv1(self):
kv1 = special.kv(1,0.2)
assert_almost_equal(kv1, 4.775972543220472, 10)
def test_kv2(self):
kv2 = special.kv(2,0.2)
assert_almost_equal(kv2, 49.51242928773287, 10)
def test_kn_largeorder(self):
assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)
def test_kv_largearg(self):
assert_equal(special.kv(0, 1e19), 0)
def test_negv_kve(self):
assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
def test_kve(self):
kve1 = special.kve(0,.2)
kv1 = special.kv(0,.2)*exp(.2)
assert_almost_equal(kve1,kv1,8)
z = .2+1j
kve2 = special.kve(0,z)
kv2 = special.kv(0,z)*exp(z)
assert_almost_equal(kve2,kv2,8)
def test_kvp_v0n1(self):
z = 2.2
assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
x = special.kvp(v,z, n=1)
assert_almost_equal(xc, x, 10) # this function (kvp) is broken
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
x = special.kvp(v, z, n=2)
assert_almost_equal(xc, x, 10)
def test_y0(self):
oz = special.y0(.1)
ozr = special.yn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_y1(self):
o1 = special.y1(.1)
o1r = special.yn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_y0_zeros(self):
yo,ypo = special.y0_zeros(2)
zo,zpo = special.y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)
assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)
def test_y1_zeros(self):
y1 = special.y1_zeros(1)
assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
def test_y1p_zeros(self):
y1p = special.y1p_zeros(1,complex=1)
assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3)
def test_yn_zeros(self):
an = special.yn_zeros(4,2)
assert_array_almost_equal(an,array([5.64515, 9.36162]),5)
an = special.yn_zeros(443,5)
assert_tol_equal(an, [450.13573091578090314, 463.05692376675001542,
472.80651546418663566, 481.27353184725625838,
488.98055964441374646], rtol=1e-15)
def test_ynp_zeros(self):
ao = special.ynp_zeros(0,2)
assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6)
ao = special.ynp_zeros(43,5)
assert_tol_equal(special.yvp(43, ao), 0, atol=1e-15)
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-9)
def test_ynp_zeros_large_order(self):
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-14)
def test_yn(self):
yn2n = special.yn(1,.2)
assert_almost_equal(yn2n,-3.3238249881118471,8)
def test_negv_yv(self):
assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)
def test_yv(self):
yv2 = special.yv(1,.2)
assert_almost_equal(yv2,-3.3238249881118471,8)
def test_negv_yve(self):
assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)
def test_yve(self):
yve2 = special.yve(1,.2)
assert_almost_equal(yve2,-3.3238249881118471,8)
yve2r = special.yv(1,.2+1j)*exp(-1)
yve22 = special.yve(1,.2+1j)
assert_almost_equal(yve22,yve2r,8)
def test_yvp(self):
yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
yvp1 = special.yvp(2,.2)
assert_array_almost_equal(yvp1,yvpr,10)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
for v in [-120, -100.3, -20., -10., -1., -.5,
0., 1., 12.49, 120., 301]:
for z in [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5,
700.6, 1300, 10003]:
yield v, z
# check half-integers; these are problematic points at least
# for cephes/iv
for v in 0.5 + arange(-60, 60):
yield v, 3.5
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert_(np.abs(c2) >= 1e300, (v, z))
elif np.isnan(c1):
assert_(c2.imag != 0, (v, z))
else:
assert_tol_equal(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_tol_equal(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
skipper = lambda v, z: (abs(v) > 50)
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)
def test_iv_cephes_vs_amos(self):
olderr = np.seterr(all='ignore')
try:
self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
finally:
np.seterr(**olderr)
@dec.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v[imsk].astype(int)
old_err = np.seterr(all='ignore')
try:
c1 = special.iv(v, x)
c2 = special.iv(v, x+0j)
# deal with differences in the inf and zero cutoffs
c1[abs(c1) > 1e300] = np.inf
c2[abs(c2) > 1e300] = np.inf
c1[abs(c1) < 1e-300] = 0
c2[abs(c2) < 1e-300] = 0
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
finally:
np.seterr(**old_err)
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))
def test_kv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_tol_equal(special.jv(3, 4), 0.43017147387562193)
assert_tol_equal(special.jv(301, 1300), 0.0183487151115275)
assert_tol_equal(special.jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_tol_equal(special.jv(-1, 1), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1), 0.43109886801837607952)
assert_tol_equal(special.yv(-0.5, 1), 0.6713967071418031)
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
assert_tol_equal(special.kv(-0.5, 1), 0.4610685044478945)
# amos
assert_tol_equal(special.jv(-1, 1+0j), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1+0j), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1+0j), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1+0j), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1+0j), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1+0j), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1+0j), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1+0j), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1+0j), 0.43109886801837607952)
assert_tol_equal(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_tol_equal(special.yv(-0.5, 1+0j), 0.6713967071418031)
assert_tol_equal(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_tol_equal(special.iv(-0.5, 1+0j), 1.231200214592967)
assert_tol_equal(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_tol_equal(special.kv(-0.5, 1+0j), 0.4610685044478945)
assert_tol_equal(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_tol_equal(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
assert_tol_equal(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_tol_equal(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))
assert_tol_equal(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert_(isnan(special.jv(0.5, -1)))
assert_(isnan(special.iv(0.5, -1)))
assert_(isnan(special.yv(0.5, -1)))
assert_(isnan(special.yv(1, -1)))
assert_(isnan(special.kv(0.5, -1)))
assert_(isnan(special.kv(1, -1)))
assert_(isnan(special.jve(0.5, -1)))
assert_(isnan(special.ive(0.5, -1)))
assert_(isnan(special.yve(0.5, -1)))
assert_(isnan(special.yve(1, -1)))
assert_(isnan(special.kve(0.5, -1)))
assert_(isnan(special.kve(1, -1)))
assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_tol_equal(special.iv(1, 700), 1.528500390233901e302)
assert_tol_equal(special.iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(float_)
r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_tol_equal(special.i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_tol_equal(special.i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_tol_equal(special.iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = special.i0(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i0e(self):
oize = special.i0e(.1)
oizer = special.ive(0,.1)
assert_almost_equal(oize,oizer,8)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = special.i1(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i1e(self):
oi1e = special.i1e(.1)
oi1er = special.ive(1,.1)
assert_almost_equal(oi1e,oi1er,8)
def test_iti0k0(self):
iti0 = array(special.iti0k0(5))
assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
def test_it2i0k0(self):
it2k = special.it2i0k0(.1)
assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
def test_iv(self):
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(iv1,0.90710092578230106,10)
def test_negv_ive(self):
assert_equal(special.ive(3,2), special.ive(-3,2))
def test_ive(self):
ive1 = special.ive(0,.1)
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(ive1,iv1,10)
def test_ivp0(self):
assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)
def test_ivp(self):
y = (special.iv(0,2) + special.iv(2,2))/2
x = special.ivp(1,2)
assert_almost_equal(x,y,10)
class TestLaguerre(TestCase):
def test_laguerre(self):
lag0 = special.laguerre(0)
lag1 = special.laguerre(1)
lag2 = special.laguerre(2)
lag3 = special.laguerre(3)
lag4 = special.laguerre(4)
lag5 = special.laguerre(5)
assert_array_almost_equal(lag0.c,[1],13)
assert_array_almost_equal(lag1.c,[-1,1],13)
assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
def test_genlaguerre(self):
k = 5*rand()-0.9
lag0 = special.genlaguerre(0,k)
lag1 = special.genlaguerre(1,k)
lag2 = special.genlaguerre(2,k)
lag3 = special.genlaguerre(3,k)
assert_equal(lag0.c,[1])
assert_equal(lag1.c,[-1,k+1])
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
# Base polynomials come from Abrahmowitz and Stegan
class TestLegendre(TestCase):
def test_legendre(self):
leg0 = special.legendre(0)
leg1 = special.legendre(1)
leg2 = special.legendre(2)
leg3 = special.legendre(3)
leg4 = special.legendre(4)
leg5 = special.legendre(5)
assert_equal(leg0.c,[1])
assert_equal(leg1.c,[1,0])
assert_equal(leg2.c,array([3,0,-1])/2.0)
assert_almost_equal(leg3.c,array([5,0,-3,0])/2.0)
assert_almost_equal(leg4.c,array([35,0,-30,0,3])/8.0)
assert_almost_equal(leg5.c,array([63,0,-70,0,15,0])/8.0)
class TestLambda(TestCase):
def test_lmbda(self):
lam = special.lmbda(1,.1)
lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),
array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))
assert_array_almost_equal(lam,lamr,8)
class TestLog1p(TestCase):
def test_log1p(self):
l1p = (special.log1p(10), special.log1p(11), special.log1p(12))
l1prl = (log(11), log(12), log(13))
assert_array_almost_equal(l1p,l1prl,8)
def test_log1pmore(self):
l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))
l1pmrl = (log(2),log(2.1),log(2.2))
assert_array_almost_equal(l1pm,l1pmrl,8)
class TestLegendreFunctions(TestCase):
def test_clpmn(self):
z = 0.5+0.3j
clp = special.clpmn(2, 2, z)
assert_array_almost_equal(clp,
(array([[1.0000, z, 0.5*(3*z*z-1)],
[0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)],
[0.0000, 0.0000, 3*(z*z-1)]]),
array([[0.0000, 1.0000, 3*z],
[0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)],
[0.0000, 0.0000, 6*z]]))
,7)
def test_clpmn_close_to_real(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi),
special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)])
,7)
def test_clpmn_across_unit_circle(self):
eps = 1e-7
m = 1
n = 1
x = 1j
assert_almost_equal(special.clpmn(m, n, x+1j*eps)[0][m, n],
special.clpmn(m, n, x-1j*eps)[0][m, n], 6)
def test_inf(self):
for z in (1, -1):
for n in range(4):
for m in range(1, n):
lp = special.clpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
lp = special.lpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
def test_lpmn(self):
lp = special.lpmn(0,2,.5)
assert_array_almost_equal(lp,(array([[1.00000,
0.50000,
-0.12500]]),
array([[0.00000,
1.00000,
1.50000]])),4)
def test_lpn(self):
lpnf = special.lpn(2,.5)
assert_array_almost_equal(lpnf,(array([1.00000,
0.50000,
-0.12500]),
array([0.00000,
1.00000,
1.50000])),4)
def test_lpmv(self):
lp = special.lpmv(0,2,.5)
assert_almost_equal(lp,-0.125,7)
lp = special.lpmv(0,40,.001)
assert_almost_equal(lp,0.1252678976534484,7)
# XXX: this is outside the domain of the current implementation,
# so ensure it returns a NaN rather than a wrong answer.
olderr = np.seterr(all='ignore')
try:
lp = special.lpmv(-1,-1,.001)
finally:
np.seterr(**olderr)
assert_(lp != 0 or np.isnan(lp))
def test_lqmn(self):
lqmnf = special.lqmn(0,2,.5)
lqmnf = special.lqmn(0,2,.5)
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
def test_lqmn_shape(self):
a, b = special.lqmn(4, 4, 1.1)
assert_equal(a.shape, (5, 5))
assert_equal(b.shape, (5, 5))
a, b = special.lqmn(4, 0, 1.1)
assert_equal(a.shape, (5, 1))
assert_equal(b.shape, (5, 1))
def test_lqn(self):
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]),
array([1.3333, 1.216, -0.8427])),4)
class TestMathieu(TestCase):
def test_mathieu_a(self):
pass
def test_mathieu_even_coef(self):
mc = special.mathieu_even_coef(2,5)
# Q not defined broken and cannot figure out proper reporting order
def test_mathieu_odd_coef(self):
pass
# same problem as above
class TestFresnelIntegral(TestCase):
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
class TestOblCvSeq(TestCase):
def test_obl_cv_seq(self):
obl = special.obl_cv_seq(0,3,1)
assert_array_almost_equal(obl,array([-0.348602,
1.393206,
5.486800,
11.492120]),5)
class TestParabolicCylinder(TestCase):
def test_pbdn_seq(self):
pb = special.pbdn_seq(1,.1)
assert_array_almost_equal(pb,(array([0.9975,
0.0998]),
array([-0.0499,
0.9925])),4)
def test_pbdv(self):
pbv = special.pbdv(1,.2)
derrl = 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = special.pbdn_seq(1,.1)
pbv = special.pbdv_seq(1,.1)
assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)
assert_tol_equal(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_tol_equal(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_tol_equal(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
class TestPolygamma(TestCase):
# from Table 6.2 (pg. 271) of A&S
def test_polygamma(self):
poly2 = special.polygamma(2,1)
poly3 = special.polygamma(3,1)
assert_almost_equal(poly2,-2.4041138063,10)
assert_almost_equal(poly3,6.4939394023,10)
# Test polygamma(0, x) == psi(x)
x = [2, 3, 1.1e14]
assert_almost_equal(special.polygamma(0, x), special.psi(x))
# Test broadcasting
n = [0, 1, 2]
x = [0.5, 1.5, 2.5]
expected = [-1.9635100260214238, 0.93480220054467933,
-0.23620405164172739]
assert_almost_equal(special.polygamma(n, x), expected)
expected = np.row_stack([expected]*2)
assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
expected)
assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
expected)
class TestProCvSeq(TestCase):
def test_pro_cv_seq(self):
prol = special.pro_cv_seq(0,3,1)
assert_array_almost_equal(prol,array([0.319000,
2.593084,
6.533471,
12.514462]),5)
class TestPsi(TestCase):
def test_psi(self):
ps = special.psi(1)
assert_almost_equal(ps,-0.57721566490153287,8)
class TestRadian(TestCase):
def test_radian(self):
rad = special.radian(90,0,0)
assert_almost_equal(rad,pi/2.0,5)
def test_radianmore(self):
rad1 = special.radian(90,1,60)
assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
class TestRiccati(TestCase):
def test_riccati_jn(self):
jnrl = (special.sph_jn(1,.2)[0]*.2,special.sph_jn(1,.2)[0]+special.sph_jn(1,.2)[1]*.2)
ricjn = special.riccati_jn(1,.2)
assert_array_almost_equal(ricjn,jnrl,8)
def test_riccati_yn(self):
ynrl = (special.sph_yn(1,.2)[0]*.2,special.sph_yn(1,.2)[0]+special.sph_yn(1,.2)[1]*.2)
ricyn = special.riccati_yn(1,.2)
assert_array_almost_equal(ricyn,ynrl,8)
class TestRound(TestCase):
def test_round(self):
rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))))
# Note: According to the documentation, scipy.special.round is
# supposed to round to the nearest even number if the fractional
# part is exactly 0.5. On some platforms, this does not appear
# to work and thus this test may fail. However, this unit test is
# correctly written.
rndrl = (10,10,10,11)
assert_array_equal(rnd,rndrl)
def test_sph_harm():
# Tests derived from tables in
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
sh = special.sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
yield (assert_array_almost_equal, sh(0,0,0,0),
0.5/sqrt(pi))
yield (assert_array_almost_equal, sh(-2,2,0.,pi/4),
0.25*sqrt(15./(2.*pi)) *
(sin(pi/4))**2.)
yield (assert_array_almost_equal, sh(-2,2,0.,pi/2),
0.25*sqrt(15./(2.*pi)))
yield (assert_array_almost_equal, sh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi)) *
exp(0+2.*pi*1j)*sin(pi/2.)**2.)
yield (assert_array_almost_equal, sh(2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi)) *
exp(0+2.*pi/4.*1j) *
sin(pi/3.)**2. *
(7.*cos(pi/3.)**2.-1))
yield (assert_array_almost_equal, sh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi)) *
exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)
class TestSpherical(TestCase):
def test_sph_harm(self):
# see test_sph_harm function
pass
def test_sph_in(self):
i1n = special.sph_in(1,.2)
inp0 = (i1n[0][1])
inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
assert_array_almost_equal(i1n[0],array([1.0066800127054699381,
0.066933714568029540839]),12)
assert_array_almost_equal(i1n[1],[inp0,inp1],12)
def test_sph_inkn(self):
spikn = r_[special.sph_in(1,.2) + special.sph_kn(1,.2)]
inkn = r_[special.sph_inkn(1,.2)]
assert_array_almost_equal(inkn,spikn,10)
def test_sph_jn(self):
s1 = special.sph_jn(2,.2)
s10 = -s1[0][1]
s11 = s1[0][0]-2.0/0.2*s1[0][1]
s12 = s1[0][1]-3.0/0.2*s1[0][2]
assert_array_almost_equal(s1[0],[0.99334665397530607731,
0.066400380670322230863,
0.0026590560795273856680],12)
assert_array_almost_equal(s1[1],[s10,s11,s12],12)
def test_sph_jnyn(self):
jnyn = r_[special.sph_jn(1,.2) + special.sph_yn(1,.2)] # tuple addition
jnyn1 = r_[special.sph_jnyn(1,.2)]
assert_array_almost_equal(jnyn1,jnyn,9)
def test_sph_kn(self):
kn = special.sph_kn(2,.2)
kn0 = -kn[0][1]
kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
assert_array_almost_equal(kn[0],[6.4302962978445670140,
38.581777787067402086,
585.15696310385559829],12)
assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
def test_sph_yn(self):
sy1 = special.sph_yn(2,.2)[0][2]
sy2 = special.sph_yn(0,.2)[0][0]
sphpy = (special.sph_yn(1,.2)[0][0]-2*special.sph_yn(2,.2)[0][2])/3 # correct derivative value
assert_almost_equal(sy1,-377.52483,5) # previous values in the system
assert_almost_equal(sy2,-4.9003329,5)
sy3 = special.sph_yn(1,.2)[1][1]
assert_almost_equal(sy3,sphpy,4) # compare correct derivative val. (correct =-system val).
class TestStruve(object):
def _series(self, v, z, n=100):
"""Compute Struve function & error estimate from its power series."""
k = arange(0, n)
r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)
err = abs(r).max() * finfo(float_).eps * n
return r.sum(), err
def test_vs_series(self):
"""Check Struve function versus its power series"""
for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
for z in [1, 10, 19, 21, 30]:
value, err = self._series(v, z)
assert_tol_equal(special.struve(v, z), value, rtol=0, atol=err), (v, z)
def test_some_values(self):
assert_tol_equal(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
assert_tol_equal(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
assert_tol_equal(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
assert_tol_equal(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
assert_equal(special.struve(-12, -41), -special.struve(-12, 41))
assert_equal(special.struve(+12, -41), -special.struve(+12, 41))
assert_equal(special.struve(-11, -41), +special.struve(-11, 41))
assert_equal(special.struve(+11, -41), +special.struve(+11, 41))
assert_(isnan(special.struve(-7.1, -1)))
assert_(isnan(special.struve(-10.1, -1)))
def test_regression_679(self):
"""Regression test for #679"""
assert_tol_equal(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))
assert_tol_equal(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))
assert_tol_equal(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))
def test_chi2_smalldf():
assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)
def test_chi2c_smalldf():
assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)
def test_chi2_inv_smalldf():
assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)
def test_agm_simple():
assert_allclose(special.agm(24, 6), 13.4581714817)
assert_allclose(special.agm(1e30, 1), 2.2292230559453832047768593e28)
def test_legacy():
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.simplefilter("ignore", RuntimeWarning)
# Legacy behavior: truncating arguments to integers
assert_equal(special.bdtrc(1, 2, 0.3), special.bdtrc(1.8, 2.8, 0.3))
assert_equal(special.bdtr(1, 2, 0.3), special.bdtr(1.8, 2.8, 0.3))
assert_equal(special.bdtri(1, 2, 0.3), special.bdtri(1.8, 2.8, 0.3))
assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3))
assert_equal(special.hyp2f0(1, 2, 0.3, 1), special.hyp2f0(1, 2, 0.3, 1.8))
assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3))
assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3))
assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3))
assert_equal(special.pdtrc(1, 0.3), special.pdtrc(1.8, 0.3))
assert_equal(special.pdtr(1, 0.3), special.pdtr(1.8, 0.3))
assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3))
assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3))
assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3))
assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3))
assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3))
finally:
warn_ctx.__exit__()
@with_special_errors
def test_error_raising():
assert_raises(special.SpecialFunctionWarning, special.iv, 1, 1e99j)
def test_xlogy():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x*np.log(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float)
z2 = np.r_[z1, [(0, 1j), (1, 1j)]]
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13)
w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1])
assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13)
def test_xlog1py():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x * np.log1p(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0),
(1, 1e-30)], dtype=float)
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13)
if __name__ == "__main__":
run_module_suite()
|
from django.db import models
# Create your models here.
from django.urls import reverse #Used to generate urls by reversing the URL patterns
class Genre(models.Model):
"""
Model representing a book genre (e.g. Science Fiction, Non Fiction).
"""
name = models.CharField(max_length=200, help_text="Enter a book genre (e.g. Science Fiction, French Poetry etc.)")
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
class Language(models.Model):
"""
Model representing a Language (e.g. English, French, Japanese, etc.)
"""
name = models.CharField(max_length=200, help_text="Enter a the book's natural language (e.g. English, French, Japanese etc.)")
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
class Book(models.Model):
"""
Model representing a book (but not a specific copy of a book).
"""
title = models.CharField(max_length=200)
author = models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)
# Foreign Key used because book can only have one author, but authors can have multiple books
# Author as a string rather than object because it hasn't been declared yet in file.
summary = models.TextField(max_length=1000, help_text="Enter a brief description of the book")
isbn = models.CharField('ISBN',max_length=13, help_text='13 Character <a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>')
genre = models.ManyToManyField(Genre, help_text="Select a genre for this book")
# ManyToManyField used because Subject can contain many books. Books can cover many subjects.
# Subject declared as an object because it has already been defined.
language = models.ForeignKey('Language', on_delete=models.SET_NULL, null=True)
def display_genre(self):
"""
Creates a string for the Genre. This is required to display genre in Admin.
"""
return ', '.join([ genre.name for genre in self.genre.all()[:3] ])
display_genre.short_description = 'Genre'
def get_absolute_url(self):
"""
Returns the url to access a particular book instance.
"""
return reverse('book-detail', args=[str(self.id)])
def __str__(self):
"""
String for representing the Model object.
"""
return self.title
import uuid # Required for unique book instances
from datetime import date
from django.contrib.auth.models import User #Required to assign User as a borrower
class BookInstance(models.Model):
"""
Model representing a specific copy of a book (i.e. that can be borrowed from the library).
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, help_text="Unique ID for this particular book across whole library")
book = models.ForeignKey('Book', on_delete=models.SET_NULL, null=True)
imprint = models.CharField(max_length=200)
due_back = models.DateField(null=True, blank=True)
borrower = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)
@property
def is_overdue(self):
if date.today() > self.due_back:
return True
return False
LOAN_STATUS = (
('d', 'Maintenance'),
('o', 'On loan'),
('a', 'Available'),
('r', 'Reserved'),
)
status= models.CharField(max_length=1, choices=LOAN_STATUS, blank=True, default='d', help_text='Book availability')
class Meta:
ordering = ["due_back"]
permissions = (("can_mark_returned", "Set book as returned"),)
def __str__(self):
"""
String for representing the Model object.
"""
return '%s (%s)' % (self.id,self.book.title)
class Author(models.Model):
"""
Model representing an author.
"""
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
date_of_birth = models.DateField(null=True, blank=True)
date_of_death = models.DateField('died', null=True, blank=True)
def get_absolute_url(self):
"""
Returns the url to access a particular author instance.
"""
return reverse('author-detail', args=[str(self.id)])
def __str__(self):
"""
String for representing the Model object.
"""
return '%s, %s' % (self.last_name, self.first_name)
Add emptiness check for is_overdue()
Without this check, having an empty due_back value on a book instance will cause a TypeError during comparison.
from django.db import models
# Create your models here.
from django.urls import reverse #Used to generate urls by reversing the URL patterns
class Genre(models.Model):
"""
Model representing a book genre (e.g. Science Fiction, Non Fiction).
"""
name = models.CharField(max_length=200, help_text="Enter a book genre (e.g. Science Fiction, French Poetry etc.)")
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
class Language(models.Model):
"""
Model representing a Language (e.g. English, French, Japanese, etc.)
"""
name = models.CharField(max_length=200, help_text="Enter a the book's natural language (e.g. English, French, Japanese etc.)")
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
class Book(models.Model):
"""
Model representing a book (but not a specific copy of a book).
"""
title = models.CharField(max_length=200)
author = models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)
# Foreign Key used because book can only have one author, but authors can have multiple books
# Author as a string rather than object because it hasn't been declared yet in file.
summary = models.TextField(max_length=1000, help_text="Enter a brief description of the book")
isbn = models.CharField('ISBN',max_length=13, help_text='13 Character <a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>')
genre = models.ManyToManyField(Genre, help_text="Select a genre for this book")
# ManyToManyField used because Subject can contain many books. Books can cover many subjects.
# Subject declared as an object because it has already been defined.
language = models.ForeignKey('Language', on_delete=models.SET_NULL, null=True)
def display_genre(self):
"""
Creates a string for the Genre. This is required to display genre in Admin.
"""
return ', '.join([ genre.name for genre in self.genre.all()[:3] ])
display_genre.short_description = 'Genre'
def get_absolute_url(self):
"""
Returns the url to access a particular book instance.
"""
return reverse('book-detail', args=[str(self.id)])
def __str__(self):
"""
String for representing the Model object.
"""
return self.title
import uuid # Required for unique book instances
from datetime import date
from django.contrib.auth.models import User #Required to assign User as a borrower
class BookInstance(models.Model):
"""
Model representing a specific copy of a book (i.e. that can be borrowed from the library).
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, help_text="Unique ID for this particular book across whole library")
book = models.ForeignKey('Book', on_delete=models.SET_NULL, null=True)
imprint = models.CharField(max_length=200)
due_back = models.DateField(null=True, blank=True)
borrower = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)
@property
def is_overdue(self):
if self.due_back and date.today() > self.due_back:
return True
return False
LOAN_STATUS = (
('d', 'Maintenance'),
('o', 'On loan'),
('a', 'Available'),
('r', 'Reserved'),
)
status= models.CharField(max_length=1, choices=LOAN_STATUS, blank=True, default='d', help_text='Book availability')
class Meta:
ordering = ["due_back"]
permissions = (("can_mark_returned", "Set book as returned"),)
def __str__(self):
"""
String for representing the Model object.
"""
return '%s (%s)' % (self.id,self.book.title)
class Author(models.Model):
"""
Model representing an author.
"""
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
date_of_birth = models.DateField(null=True, blank=True)
date_of_death = models.DateField('died', null=True, blank=True)
def get_absolute_url(self):
"""
Returns the url to access a particular author instance.
"""
return reverse('author-detail', args=[str(self.id)])
def __str__(self):
"""
String for representing the Model object.
"""
return '%s, %s' % (self.last_name, self.first_name)
|
# coding: utf-8
""" A script for making figures for our Streams Paper 1 """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
# Third-party
import astropy.units as u
import numpy as np
import matplotlib.pyplot as plt
from streams.data.gaia import parallax_error, proper_motion_error, \
rr_lyrae_V_minus_I, rr_lyrae_M_V, \
apparent_magnitude
from streams.potential import LawMajewski2010
from streams.integrate.satellite_particles import SatelliteParticleIntegrator
from streams.data import lm10_particles, lm10_satellite, lm10_time
def normed_objective_plot():
""" Plot our objective function in each of the 4 parameters we vary """
# Can change this to the true adaptive functions so I can compare
timestep2 = lambda *args,**kwargs: -1.
resolution = 1.
# Read in the LM10 data
particles = lm10_particles(N=100, expr="(Pcol>0) & (abs(Lmflag)==1)")
satellite = lm10_satellite()
t1,t2 = lm10_time()
resolution = 3.
variances = dict()
for param in ['q1','qz','v_halo','phi']:
if not variances.has_key(param):
variances[param] = []
stats = np.linspace(true_params[param]*0.9,
true_params[param]*1.1,
10)
for stat in stats:
params = true_params.copy()
params[param] = stat
lm10 = LawMajewski2010(**params)
integrator = SatelliteParticleIntegrator(lm10, satellite, particles)
s_orbit,p_orbits = integrator.run(timestep_func=timestep2,
timestep_args=(lm10, satellite.m.value),
resolution=resolution,
t1=t1, t2=t2)
variances[param].append(generalized_variance_prod(lm10, p_orbits, s_orbit))
add script for making Gaia error plots
# coding: utf-8
""" A script for making figures for our Streams Paper 1 """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
# Third-party
import astropy.units as u
import numpy as np
import matplotlib.pyplot as plt
from streams.data.gaia import parallax_error, proper_motion_error, \
rr_lyrae_V_minus_I, rr_lyrae_M_V, \
apparent_magnitude
from streams.potential import LawMajewski2010
from streams.integrate.satellite_particles import SatelliteParticleIntegrator
from streams.data import lm10_particles, lm10_satellite, lm10_time
def gaia_spitzer_errors():
""" Visualize the observational errors from Gaia and Spitzer, along with
dispersion and distance scale of Sgr and Orphan.
"""
# Distance from 1kpc to ~100kpc
D = np.logspace(0., 2., 50)*u.kpc
# Compute the apparent magnitude as a function of distance
m_V = apparent_magnitude(rr_lyrae_M_V, D)
fig,axes = plt.subplots(2, 1, figsize=(12, 12))
# Plot Gaia distance errors
dp = parallax_error(m_V, rr_lyrae_V_minus_I).arcsecond
dD = D.to(u.pc).value**2 * dp * u.pc
axes[0].loglog(D, (dD/D).decompose(), color="k", linewidth=1, alpha=0.5)
# Plot tangential velocity errors
dpm = proper_motion_error(m_V, rr_lyrae_V_minus_I)
dVtan = (dpm*D).to(u.km*u.radian/u.s).value
axes[1].loglog(D, dVtan, color="k", linewidth=1, alpha=0.5)
plt.show()
def normed_objective_plot():
""" Plot our objective function in each of the 4 parameters we vary """
# Can change this to the true adaptive functions so I can compare
timestep2 = lambda *args,**kwargs: -1.
resolution = 1.
# Read in the LM10 data
particles = lm10_particles(N=100, expr="(Pcol>0) & (abs(Lmflag)==1)")
satellite = lm10_satellite()
t1,t2 = lm10_time()
resolution = 3.
variances = dict()
for param in ['q1','qz','v_halo','phi']:
if not variances.has_key(param):
variances[param] = []
stats = np.linspace(true_params[param]*0.9,
true_params[param]*1.1,
10)
for stat in stats:
params = true_params.copy()
params[param] = stat
lm10 = LawMajewski2010(**params)
integrator = SatelliteParticleIntegrator(lm10, satellite, particles)
s_orbit,p_orbits = integrator.run(timestep_func=timestep2,
timestep_args=(lm10, satellite.m.value),
resolution=resolution,
t1=t1, t2=t2)
variances[param].append(generalized_variance_prod(lm10, p_orbits, s_orbit))
if __name__ == '__main__':
gaia_spitzer_errors() |
Update example2 - object distances.py
|
#!/usr/local/bin/python
# coding: utf-8
import sys
import weathermath
import building
import database
from datetime import datetime
def processRoom(room, db):
if room.insideSensor.data_available() and room.outsideSensor.data_available():
insideTemp = room.insideSensor.getTemperature()
insideRelHumid = room.insideSensor.getHumidity()
insideAbsHumid = weathermath.AF(insideRelHumid, insideTemp)
outsideTemp = room.outsideSensor.getTemperature()
outsideRelHumid = room.outsideSensor.getHumidity()
outsideAbsHumid = weathermath.AF(outsideRelHumid, insideTemp)
recommendVentilation = False
if insideAbsHumid - room.minHumidDiff > outsideAbsHumid:
recommendVentilation = True
if insideTemp <= room.minInsideTemp:
recommendVentilation = False
c = db.cursor()
c.execute('insert into weather (date, room, inside_temperature, inside_humidity, ' +
'outside_temperature, outside_humidity, ventilation_recommended) values ' +
'(?, ?, ?, ?, ?, ?, ?)', (datetime.now(), room.name, insideTemp, insideRelHumid, outsideTemp, outsideRelHumid, recommendVentilation))
db.commit()
def main():
db = None
try:
db = database.connect_db()
rooms = building.getRooms()
for room in rooms:
processRoom(room, db)
finally:
if db:
db.close()
if __name__ == '__main__':
main()
fixes outside abs humid computation
#!/usr/local/bin/python
# coding: utf-8
import sys
import weathermath
import building
import database
from datetime import datetime
def processRoom(room, db):
if room.insideSensor.data_available() and room.outsideSensor.data_available():
insideTemp = room.insideSensor.getTemperature()
insideRelHumid = room.insideSensor.getHumidity()
insideAbsHumid = weathermath.AF(insideRelHumid, insideTemp)
outsideTemp = room.outsideSensor.getTemperature()
outsideRelHumid = room.outsideSensor.getHumidity()
outsideAbsHumid = weathermath.AF(outsideRelHumid, outsideTemp)
recommendVentilation = False
if insideAbsHumid - room.minHumidDiff > outsideAbsHumid:
recommendVentilation = True
if insideTemp <= room.minInsideTemp:
recommendVentilation = False
c = db.cursor()
c.execute('insert into weather (date, room, inside_temperature, inside_humidity, ' +
'outside_temperature, outside_humidity, ventilation_recommended) values ' +
'(?, ?, ?, ?, ?, ?, ?)', (datetime.now(), room.name, insideTemp, insideRelHumid, outsideTemp, outsideRelHumid, recommendVentilation))
db.commit()
def main():
db = None
try:
db = database.connect_db()
rooms = building.getRooms()
for room in rooms:
processRoom(room, db)
finally:
if db:
db.close()
if __name__ == '__main__':
main()
|
"""Tests of imports of sciunit submodules and other dependencies"""
from .base import *
class ImportTestCase(unittest.TestCase):
"""Unit tests for imports"""
def test_quantities(self):
import quantities as pq
pq.Quantity([10,20,30], pq.pA)
def test_import_everything(self):
import sciunit
from sciunit.utils import import_all_modules
# Recursively import all submodules
import_all_modules(sciunit)
if __name__ == '__main__':
unittest.main()
Added import
"""Tests of imports of sciunit submodules and other dependencies"""
import unittest
from .base import *
class ImportTestCase(unittest.TestCase):
"""Unit tests for imports"""
def test_quantities(self):
import quantities as pq
pq.Quantity([10,20,30], pq.pA)
def test_import_everything(self):
import sciunit
from sciunit.utils import import_all_modules
# Recursively import all submodules
import_all_modules(sciunit)
if __name__ == '__main__':
unittest.main() |
import os
from flask import flash, get_flashed_messages, redirect, render_template, \
request, url_for, abort
from flask import Blueprint, Markup
from flask.ext.login import current_user
from werkzeug import secure_filename
from viaduct import application, db
from viaduct.helpers import flash_form_errors
from forms import CreateForm
from models import Activity
#from dateutil.parser import parse
import datetime
blueprint = Blueprint('activity', __name__)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in set(['png', 'jpg', 'gif', 'jpeg'])
# Overview of activities
@blueprint.route('/activities/', methods=['GET', 'POST'])
@blueprint.route('/activities/<int:page>/', methods=['GET', 'POST'])
def view(page=1):
activities = Activity.query \
.order_by(Activity.start_time.desc()) \
.paginate(page, 15, False)
return render_template('activity/view.htm', activities=activities)
@blueprint.route('activities/activity/<int:activity_id>', methods=['GET', 'POST'])
def get_activity(activity_id = 0):
activity = Activity.query.filter(Activity.id == activity_id).first()
return render_template('activities/activity/view_single.htm', activity=activity)
@blueprint.route('activities/activity/create/', methods=['GET', 'POST'])
@blueprint.route('activities/activity/edit/<int:activity_id>', methods=['GET', 'POST'])
def create(activity_id=None):
if not current_user or current_user.email != 'administrator@svia.nl':
return abort(403)
if activity_id:
activity = Activity.query.filter(Activity.id == activity_id).first()
else:
activity = None
form = CreateForm(request.form, activity)
if request.method == 'POST':
valid_form = True
owner_id = current_user.id
name = form.name.data
description = request.form['description'].strip()
start_date = request.form['start_date'].strip()
start_time = request.form['start_time'].strip()
start = datetime.datetime.strptime(start_date + start_time, '%Y-%m-%d%H:%M')
end_date = request.form['end_date'].strip()
end_time = request.form['end_time'].strip()
end = datetime.datetime.strptime(end_date + end_time, '%Y-%m-%d%H:%M')
location = request.form['location'].strip()
privacy = "public"
price = request.form['price'].strip()
file = request.files['picture']
if file and allowed_file(file.filename):
picture = secure_filename(file.filename)
file.save(os.path.join('viaduct/static/activity_pictures', picture))
else:
picture = "yolo.png"
venue = 1 # Facebook ID location, not used yet
if not name:
flash('No activity name has been specified.', 'error')
valid_form = False
if not description:
flash('The activity requires a description.', 'error')
valid_form = False
if valid_form:
activity = Activity(
owner_id,
name,
description,
start,
end,
location,
privacy,
price,
picture,
venue
)
db.session.add(activity)
db.session.commit()
flash('You\'ve created an activity successfully.')
return redirect(url_for('activity.view'))
else:
flash_form_errors(form)
return render_template('activity/create.htm', form=form)
hello
import os
from flask import flash, get_flashed_messages, redirect, render_template, \
request, url_for, abort
from flask import Blueprint, Markup
from flask.ext.login import current_user
from werkzeug import secure_filename
from viaduct import application, db
from viaduct.helpers import flash_form_errors
from forms import CreateForm
from models import Activity
#from dateutil.parser import parse
import datetime
blueprint = Blueprint('activity', __name__)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in set(['png', 'jpg', 'gif', 'jpeg'])
# Overview of activities
@blueprint.route('/activities/', methods=['GET', 'POST'])
@blueprint.route('/activities/<int:page>/', methods=['GET', 'POST'])
def view(page=1):
activities = Activity.query \
.order_by(Activity.start_time.desc()) \
.paginate(page, 15, False)
return render_template('activity/view.htm', activities=activities)
@blueprint.route('/activities/activity/<int:activity_id>', methods=['GET', 'POST'])
def get_activity(activity_id = 0):
activity = Activity.query.filter(Activity.id == activity_id).first()
return render_template('activities/activity/view_single.htm', activity=activity)
@blueprint.route('/activities/activity/create/', methods=['GET', 'POST'])
@blueprint.route('/activities/activity/edit/<int:activity_id>', methods=['GET', 'POST'])
def create(activity_id=None):
if not current_user or current_user.email != 'administrator@svia.nl':
return abort(403)
if activity_id:
activity = Activity.query.filter(Activity.id == activity_id).first()
else:
activity = None
form = CreateForm(request.form, activity)
if request.method == 'POST':
valid_form = True
owner_id = current_user.id
name = form.name.data
description = request.form['description'].strip()
start_date = request.form['start_date'].strip()
start_time = request.form['start_time'].strip()
start = datetime.datetime.strptime(start_date + start_time, '%Y-%m-%d%H:%M')
end_date = request.form['end_date'].strip()
end_time = request.form['end_time'].strip()
end = datetime.datetime.strptime(end_date + end_time, '%Y-%m-%d%H:%M')
location = request.form['location'].strip()
privacy = "public"
price = request.form['price'].strip()
file = request.files['picture']
if file and allowed_file(file.filename):
picture = secure_filename(file.filename)
file.save(os.path.join('viaduct/static/activity_pictures', picture))
else:
picture = "yolo.png"
venue = 1 # Facebook ID location, not used yet
if not name:
flash('No activity name has been specified.', 'error')
valid_form = False
if not description:
flash('The activity requires a description.', 'error')
valid_form = False
if valid_form:
activity = Activity(
owner_id,
name,
description,
start,
end,
location,
privacy,
price,
picture,
venue
)
db.session.add(activity)
db.session.commit()
flash('You\'ve created an activity successfully.')
return redirect(url_for('activity.view'))
else:
flash_form_errors(form)
return render_template('activity/create.htm', form=form)
|
""" Sample controller """
#from kervi.controller import Controller
#from kervi.values import DynamicBoolean, DynamicNumber
#from kervi.hal import GPIO
from kervi.sensor import Sensor
from kervi.steering import MotorSteering
from kervi_devices.motors.adafruit_i2c_motor_hat import AdafruitMotorHAT
from kervi_devices.sensors.LSM9DS0 import LSM9DS0OrientationDeviceDriver
from kervi.controller import Controller
from kervi.values import DynamicNumber, DynamicBoolean
import time
class PIDController(Controller):
def __init__(self, controller_id, name):
Controller.__init__(self, controller_id, name)
self.kp = self.inputs.add("kp", "Kp", DynamicNumber)
self.kd = self.inputs.add("kd", "Kd", DynamicNumber)
self.ki = self.inputs.add("ki", "Ki", DynamicNumber)
self.active = self.inputs.add("active", "Active", DynamicBoolean)
self.windup_guard = self.inputs.add("windup_guard", "Windup guard", DynamicNumber)
self.base_value = self.inputs.add("base_value", "Base value", DynamicNumber)
self.value = self.inputs.add("value", "Value", DynamicNumber)
self .result = self.outputs.add("pid_result", "PID result", DynamicNumber)
self.sample_time = 0.00
self.current_time = time.time()
self.last_time = self.current_time
self.p_term = 0.0
self.i_term = 0.0
self.d_term = 0.0
self.last_error = 0.0
# Windup Guard
self.int_error = 0.0
self.windup_guard.value = 20.0
def dynamic_value_changed(self, changed_input):
if changed_input == self.value:
if self.active.value:
error = self.value.value - self.base_value.value
self.current_time = time.time()
delta_time = self.current_time - self.last_time
delta_error = error - self.last_error
if delta_time >= self.sample_time:
self.p_term = self.kp.value * error
self.i_term += error * delta_time
if self.i_term < -self.windup_guard.value:
self.i_term = -self.windup_guard.value
elif self.i_term > self.windup_guard.value:
self.i_term = self.windup_guard.value
self.d_term = 0.0
if delta_time > 0:
self.d_term = delta_error / delta_time
self.last_time = self.current_time
self.last_error = error
self.value.value = self.p_term + (self.ki.value * self.i_term) + (self.kd.value * self.d_term)
steering = MotorSteering()
steering.speed.link_to_dashboard("app", "steering")
steering.direction.link_to_dashboard("app", "steering")
steering.all_off.link_to_dashboard("app", "steering")
steering.speed.link_to_dashboard("app", "left_pad_y")
steering.direction.link_to_dashboard("app", "left_pad_x")
pid_controller = PIDController("balance_pid", "Balance pid")
pid_controller.kp.link_to_dashboard("app", "balance_pid")
pid_controller.ki.link_to_dashboard("app", "balance_pid")
pid_controller.kd.link_to_dashboard("app", "balance_pid")
pid_controller.windup_guard.link_to_dashboard("app", "balance_pid")
pid_controller.base_value.link_to_dashboard("app", "balance_pid")
pid_controller.active.link_to_dashboard("app", "balance_pid")
steering.speed.link_to(pid_controller.value)
motor_board = AdafruitMotorHAT()
motor_board.dc_motors[2].speed.link_to(steering.left_speed)
motor_board.dc_motors[3].speed.link_to(steering.right_speed)
orientation_sensor = Sensor("orientation", "orientation", LSM9DS0OrientationDeviceDriver())
orientation_sensor.store_to_db = False
orientation_sensor.link_to_dashboard("app", "sensors", type="value", size=2)
fix bug
""" Sample controller """
#from kervi.controller import Controller
#from kervi.values import DynamicBoolean, DynamicNumber
#from kervi.hal import GPIO
from kervi.sensor import Sensor
from kervi.steering import MotorSteering
from kervi_devices.motors.adafruit_i2c_motor_hat import AdafruitMotorHAT
from kervi_devices.sensors.LSM9DS0 import LSM9DS0OrientationDeviceDriver
from kervi.controller import Controller
from kervi.values import DynamicNumber, DynamicBoolean
import time
class PIDController(Controller):
def __init__(self, controller_id, name):
Controller.__init__(self, controller_id, name)
self.kp = self.inputs.add("kp", "Kp", DynamicNumber)
self.kd = self.inputs.add("kd", "Kd", DynamicNumber)
self.ki = self.inputs.add("ki", "Ki", DynamicNumber)
self.active = self.inputs.add("active", "Active", DynamicBoolean)
self.windup_guard = self.inputs.add("windup_guard", "Windup guard", DynamicNumber)
self.base_value = self.inputs.add("base_value", "Base value", DynamicNumber)
self.value = self.inputs.add("value", "Value", DynamicNumber)
self.result = self.outputs.add("pid_result", "PID result", DynamicNumber)
self.sample_time = 0.00
self.current_time = time.time()
self.last_time = self.current_time
self.p_term = 0.0
self.i_term = 0.0
self.d_term = 0.0
self.last_error = 0.0
# Windup Guard
self.int_error = 0.0
self.windup_guard.value = 20.0
def dynamic_value_changed(self, changed_input):
if changed_input == self.value:
if self.active.value:
error = self.value.value - self.base_value.value
self.current_time = time.time()
delta_time = self.current_time - self.last_time
delta_error = error - self.last_error
if delta_time >= self.sample_time:
self.p_term = self.kp.value * error
self.i_term += error * delta_time
if self.i_term < -self.windup_guard.value:
self.i_term = -self.windup_guard.value
elif self.i_term > self.windup_guard.value:
self.i_term = self.windup_guard.value
self.d_term = 0.0
if delta_time > 0:
self.d_term = delta_error / delta_time
self.last_time = self.current_time
self.last_error = error
self.value.value = self.p_term + (self.ki.value * self.i_term) + (self.kd.value * self.d_term)
steering = MotorSteering()
steering.speed.link_to_dashboard("app", "steering")
steering.direction.link_to_dashboard("app", "steering")
steering.all_off.link_to_dashboard("app", "steering")
steering.speed.link_to_dashboard("app", "left_pad_y")
steering.direction.link_to_dashboard("app", "left_pad_x")
pid_controller = PIDController("balance_pid", "Balance pid")
pid_controller.kp.link_to_dashboard("app", "balance_pid")
pid_controller.ki.link_to_dashboard("app", "balance_pid")
pid_controller.kd.link_to_dashboard("app", "balance_pid")
pid_controller.windup_guard.link_to_dashboard("app", "balance_pid")
pid_controller.base_value.link_to_dashboard("app", "balance_pid")
pid_controller.active.link_to_dashboard("app", "balance_pid")
steering.speed.link_to(pid_controller.result)
motor_board = AdafruitMotorHAT()
motor_board.dc_motors[2].speed.link_to(steering.left_speed)
motor_board.dc_motors[3].speed.link_to(steering.right_speed)
orientation_sensor = Sensor("orientation", "orientation", LSM9DS0OrientationDeviceDriver())
orientation_sensor.store_to_db = False
orientation_sensor.link_to_dashboard("app", "sensors", type="value", size=2)
|
# -*- coding: utf-8 -*-
from django.core.urlresolvers import resolve, reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from maintenance_mode import core
from maintenance_mode import settings
class MaintenanceModeMiddleware(object):
def process_request(self, request):
if settings.MAINTENANCE_MODE or core.get_maintenance_mode():
try:
url_off = reverse('maintenance_mode_off')
resolve(url_off)
if url_off == request.path_info:
return None
except NoReverseMatch:
#maintenance_mode.urls not added
pass
if hasattr(request, 'user'):
if settings.MAINTENANCE_MODE_IGNORE_STAFF and request.user.is_staff:
return None
if settings.MAINTENANCE_MODE_IGNORE_SUPERUSER and request.user.is_superuser:
return None
for ip_address_re in settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES_RE:
if ip_address_re.match(request.META['REMOTE_ADDR']):
return None
for url_re in settings.MAINTENANCE_MODE_IGNORE_URLS_RE:
if url_re.match(request.path_info):
return None
if settings.MAINTENANCE_MODE_REDIRECT_URL:
return HttpResponseRedirect(settings.MAINTENANCE_MODE_REDIRECT_URL)
else:
return render_to_response(settings.MAINTENANCE_MODE_TEMPLATE, self.get_request_context(request), context_instance=RequestContext(request), content_type='text/html', status=503)
else:
return None
def get_request_context(self, request):
if settings.MAINTENANCE_MODE_TEMPLATE_CONTEXT:
from importlib import import_module
func_path = settings.MAINTENANCE_MODE_TEMPLATE_CONTEXT
mod_name, func_name = func_path.rsplit('.',1)
mod = import_module(mod_name)
func = getattr(mod, func_name)
return func(request = request)
else:
return {}
Fix missing import
# -*- coding: utf-8 -*-
from django.core.urlresolvers import NoReverseMatch, resolve, reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from maintenance_mode import core
from maintenance_mode import settings
class MaintenanceModeMiddleware(object):
def process_request(self, request):
if settings.MAINTENANCE_MODE or core.get_maintenance_mode():
try:
url_off = reverse('maintenance_mode_off')
resolve(url_off)
if url_off == request.path_info:
return None
except NoReverseMatch:
#maintenance_mode.urls not added
pass
if hasattr(request, 'user'):
if settings.MAINTENANCE_MODE_IGNORE_STAFF and request.user.is_staff:
return None
if settings.MAINTENANCE_MODE_IGNORE_SUPERUSER and request.user.is_superuser:
return None
for ip_address_re in settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES_RE:
if ip_address_re.match(request.META['REMOTE_ADDR']):
return None
for url_re in settings.MAINTENANCE_MODE_IGNORE_URLS_RE:
if url_re.match(request.path_info):
return None
if settings.MAINTENANCE_MODE_REDIRECT_URL:
return HttpResponseRedirect(settings.MAINTENANCE_MODE_REDIRECT_URL)
else:
return render_to_response(settings.MAINTENANCE_MODE_TEMPLATE, self.get_request_context(request), context_instance=RequestContext(request), content_type='text/html', status=503)
else:
return None
def get_request_context(self, request):
if settings.MAINTENANCE_MODE_TEMPLATE_CONTEXT:
from importlib import import_module
func_path = settings.MAINTENANCE_MODE_TEMPLATE_CONTEXT
mod_name, func_name = func_path.rsplit('.',1)
mod = import_module(mod_name)
func = getattr(mod, func_name)
return func(request = request)
else:
return {}
|
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for training the prediction model."""
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from prediction_input import build_tfrecord_input
from prediction_model import construct_model
# How often to record tensorboard summaries.
SUMMARY_INTERVAL = 40
# How often to run a batch through the validation model.
VAL_INTERVAL = 200
# How often to save a model checkpoint
SAVE_INTERVAL = 2000
# tf record data location:
DATA_DIR = 'push/push_train'
# local output directory
OUT_DIR = '/tmp/data'
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', DATA_DIR, 'directory containing data.')
flags.DEFINE_string('output_dir', OUT_DIR, 'directory for model checkpoints.')
flags.DEFINE_string('event_log_dir', OUT_DIR, 'directory for writing summary.')
flags.DEFINE_integer('num_iterations', 100000, 'number of training iterations.')
flags.DEFINE_string('pretrained_model', '',
'filepath of a pretrained model to initialize from.')
flags.DEFINE_integer('sequence_length', 10,
'sequence length, including context frames.')
flags.DEFINE_integer('context_frames', 2, '# of frames before predictions.')
flags.DEFINE_integer('use_state', 1,
'Whether or not to give the state+action to the model')
flags.DEFINE_string('model', 'CDNA',
'model architecture to use - CDNA, DNA, or STP')
flags.DEFINE_integer('num_masks', 10,
'number of masks, usually 1 for DNA, 10 for CDNA, STN.')
flags.DEFINE_float('schedsamp_k', 900.0,
'The k hyperparameter for scheduled sampling,'
'-1 for no scheduled sampling.')
flags.DEFINE_float('train_val_split', 0.95,
'The percentage of files to use for the training set,'
' vs. the validation set.')
flags.DEFINE_integer('batch_size', 32, 'batch size for training')
flags.DEFINE_float('learning_rate', 0.001,
'the base learning rate of the generator')
## Helper functions
def peak_signal_to_noise_ratio(true, pred):
"""Image quality metric based on maximal signal power vs. power of the noise.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
peak signal to noise ratio (PSNR)
"""
return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0)
def mean_squared_error(true, pred):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
return tf.reduce_sum(tf.square(true - pred)) / tf.to_float(tf.size(pred))
class Model(object):
def __init__(self,
images=None,
actions=None,
states=None,
sequence_length=None,
reuse_scope=None,
prefix=None):
if sequence_length is None:
sequence_length = FLAGS.sequence_length
if prefix is None:
prefix = tf.placeholder(tf.string, [])
self.prefix = prefix
self.iter_num = tf.placeholder(tf.float32, [])
summaries = []
# Split into timesteps.
actions = tf.split(axis=1, num_or_size_splits=int(actions.get_shape()[1]), value=actions)
actions = [tf.squeeze(act) for act in actions]
states = tf.split(axis=1, num_or_size_splits=int(states.get_shape()[1]), value=states)
states = [tf.squeeze(st) for st in states]
images = tf.split(axis=1, num_or_size_splits=int(images.get_shape()[1]), value=images)
images = [tf.squeeze(img) for img in images]
if reuse_scope is None:
gen_images, gen_states = construct_model(
images,
actions,
states,
iter_num=self.iter_num,
k=FLAGS.schedsamp_k,
use_state=FLAGS.use_state,
num_masks=FLAGS.num_masks,
cdna=FLAGS.model == 'CDNA',
dna=FLAGS.model == 'DNA',
stp=FLAGS.model == 'STP',
context_frames=FLAGS.context_frames)
else: # If it's a validation or test model.
with tf.variable_scope(reuse_scope, reuse=True):
gen_images, gen_states = construct_model(
images,
actions,
states,
iter_num=self.iter_num,
k=FLAGS.schedsamp_k,
use_state=FLAGS.use_state,
num_masks=FLAGS.num_masks,
cdna=FLAGS.model == 'CDNA',
dna=FLAGS.model == 'DNA',
stp=FLAGS.model == 'STP',
context_frames=FLAGS.context_frames)
# L2 loss, PSNR for eval.
loss, psnr_all = 0.0, 0.0
for i, x, gx in zip(
range(len(gen_images)), images[FLAGS.context_frames:],
gen_images[FLAGS.context_frames - 1:]):
recon_cost = mean_squared_error(x, gx)
psnr_i = peak_signal_to_noise_ratio(x, gx)
psnr_all += psnr_i
summaries.append(
tf.summary.scalar(prefix + '_recon_cost' + str(i), recon_cost))
summaries.append(tf.summary.scalar(prefix + '_psnr' + str(i), psnr_i))
loss += recon_cost
for i, state, gen_state in zip(
range(len(gen_states)), states[FLAGS.context_frames:],
gen_states[FLAGS.context_frames - 1:]):
state_cost = mean_squared_error(state, gen_state) * 1e-4
summaries.append(
tf.summary.scalar(prefix + '_state_cost' + str(i), state_cost))
loss += state_cost
summaries.append(tf.summary.scalar(prefix + '_psnr_all', psnr_all))
self.psnr_all = psnr_all
self.loss = loss = loss / np.float32(len(images) - FLAGS.context_frames)
summaries.append(tf.summary.scalar(prefix + '_loss', loss))
self.lr = tf.placeholder_with_default(FLAGS.learning_rate, ())
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)
self.summ_op = tf.summary.merge(summaries)
def main(unused_argv):
print('Constructing models and inputs.')
with tf.variable_scope('model', reuse=None) as training_scope:
images, actions, states = build_tfrecord_input(training=True)
model = Model(images, actions, states, FLAGS.sequence_length,
prefix='train')
with tf.variable_scope('val_model', reuse=None):
val_images, val_actions, val_states = build_tfrecord_input(training=False)
val_model = Model(val_images, val_actions, val_states,
FLAGS.sequence_length, training_scope, prefix='val')
print('Constructing saver.')
# Make saver.
saver = tf.train.Saver(
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=0)
# Make training session.
sess = tf.InteractiveSession()
summary_writer = tf.summary.FileWriter(
FLAGS.event_log_dir, graph=sess.graph, flush_secs=10)
if FLAGS.pretrained_model:
saver.restore(sess, FLAGS.pretrained_model)
tf.train.start_queue_runners(sess)
sess.run(tf.global_variables_initializer())
tf.logging.info('iteration number, cost')
# Run training.
for itr in range(FLAGS.num_iterations):
# Generate new batch of data.
feed_dict = {model.iter_num: np.float32(itr),
model.lr: FLAGS.learning_rate}
cost, _, summary_str = sess.run([model.loss, model.train_op, model.summ_op],
feed_dict)
# Print info: iteration #, cost.
tf.logging.info(str(itr) + ' ' + str(cost))
if (itr) % VAL_INTERVAL == 2:
# Run through validation set.
feed_dict = {val_model.lr: 0.0,
val_model.iter_num: np.float32(itr)}
_, val_summary_str = sess.run([val_model.train_op, val_model.summ_op],
feed_dict)
summary_writer.add_summary(val_summary_str, itr)
if (itr) % SAVE_INTERVAL == 2:
tf.logging.info('Saving model.')
saver.save(sess, FLAGS.output_dir + '/model' + str(itr))
if (itr) % SUMMARY_INTERVAL:
summary_writer.add_summary(summary_str, itr)
tf.logging.info('Saving model.')
saver.save(sess, FLAGS.output_dir + '/model')
tf.logging.info('Training complete')
tf.logging.flush()
if __name__ == '__main__':
app.run()
Fixing the initialization/loading bug.
The code currently loads the checkpoint and then initializes the variables resulting to random weights.
Swapping the order fixes the loading checkpoint issue.
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for training the prediction model."""
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from prediction_input import build_tfrecord_input
from prediction_model import construct_model
# How often to record tensorboard summaries.
SUMMARY_INTERVAL = 40
# How often to run a batch through the validation model.
VAL_INTERVAL = 200
# How often to save a model checkpoint
SAVE_INTERVAL = 2000
# tf record data location:
DATA_DIR = 'push/push_train'
# local output directory
OUT_DIR = '/tmp/data'
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', DATA_DIR, 'directory containing data.')
flags.DEFINE_string('output_dir', OUT_DIR, 'directory for model checkpoints.')
flags.DEFINE_string('event_log_dir', OUT_DIR, 'directory for writing summary.')
flags.DEFINE_integer('num_iterations', 100000, 'number of training iterations.')
flags.DEFINE_string('pretrained_model', '',
'filepath of a pretrained model to initialize from.')
flags.DEFINE_integer('sequence_length', 10,
'sequence length, including context frames.')
flags.DEFINE_integer('context_frames', 2, '# of frames before predictions.')
flags.DEFINE_integer('use_state', 1,
'Whether or not to give the state+action to the model')
flags.DEFINE_string('model', 'CDNA',
'model architecture to use - CDNA, DNA, or STP')
flags.DEFINE_integer('num_masks', 10,
'number of masks, usually 1 for DNA, 10 for CDNA, STN.')
flags.DEFINE_float('schedsamp_k', 900.0,
'The k hyperparameter for scheduled sampling,'
'-1 for no scheduled sampling.')
flags.DEFINE_float('train_val_split', 0.95,
'The percentage of files to use for the training set,'
' vs. the validation set.')
flags.DEFINE_integer('batch_size', 32, 'batch size for training')
flags.DEFINE_float('learning_rate', 0.001,
'the base learning rate of the generator')
## Helper functions
def peak_signal_to_noise_ratio(true, pred):
"""Image quality metric based on maximal signal power vs. power of the noise.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
peak signal to noise ratio (PSNR)
"""
return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0)
def mean_squared_error(true, pred):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
return tf.reduce_sum(tf.square(true - pred)) / tf.to_float(tf.size(pred))
class Model(object):
def __init__(self,
images=None,
actions=None,
states=None,
sequence_length=None,
reuse_scope=None,
prefix=None):
if sequence_length is None:
sequence_length = FLAGS.sequence_length
if prefix is None:
prefix = tf.placeholder(tf.string, [])
self.prefix = prefix
self.iter_num = tf.placeholder(tf.float32, [])
summaries = []
# Split into timesteps.
actions = tf.split(axis=1, num_or_size_splits=int(actions.get_shape()[1]), value=actions)
actions = [tf.squeeze(act) for act in actions]
states = tf.split(axis=1, num_or_size_splits=int(states.get_shape()[1]), value=states)
states = [tf.squeeze(st) for st in states]
images = tf.split(axis=1, num_or_size_splits=int(images.get_shape()[1]), value=images)
images = [tf.squeeze(img) for img in images]
if reuse_scope is None:
gen_images, gen_states = construct_model(
images,
actions,
states,
iter_num=self.iter_num,
k=FLAGS.schedsamp_k,
use_state=FLAGS.use_state,
num_masks=FLAGS.num_masks,
cdna=FLAGS.model == 'CDNA',
dna=FLAGS.model == 'DNA',
stp=FLAGS.model == 'STP',
context_frames=FLAGS.context_frames)
else: # If it's a validation or test model.
with tf.variable_scope(reuse_scope, reuse=True):
gen_images, gen_states = construct_model(
images,
actions,
states,
iter_num=self.iter_num,
k=FLAGS.schedsamp_k,
use_state=FLAGS.use_state,
num_masks=FLAGS.num_masks,
cdna=FLAGS.model == 'CDNA',
dna=FLAGS.model == 'DNA',
stp=FLAGS.model == 'STP',
context_frames=FLAGS.context_frames)
# L2 loss, PSNR for eval.
loss, psnr_all = 0.0, 0.0
for i, x, gx in zip(
range(len(gen_images)), images[FLAGS.context_frames:],
gen_images[FLAGS.context_frames - 1:]):
recon_cost = mean_squared_error(x, gx)
psnr_i = peak_signal_to_noise_ratio(x, gx)
psnr_all += psnr_i
summaries.append(
tf.summary.scalar(prefix + '_recon_cost' + str(i), recon_cost))
summaries.append(tf.summary.scalar(prefix + '_psnr' + str(i), psnr_i))
loss += recon_cost
for i, state, gen_state in zip(
range(len(gen_states)), states[FLAGS.context_frames:],
gen_states[FLAGS.context_frames - 1:]):
state_cost = mean_squared_error(state, gen_state) * 1e-4
summaries.append(
tf.summary.scalar(prefix + '_state_cost' + str(i), state_cost))
loss += state_cost
summaries.append(tf.summary.scalar(prefix + '_psnr_all', psnr_all))
self.psnr_all = psnr_all
self.loss = loss = loss / np.float32(len(images) - FLAGS.context_frames)
summaries.append(tf.summary.scalar(prefix + '_loss', loss))
self.lr = tf.placeholder_with_default(FLAGS.learning_rate, ())
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)
self.summ_op = tf.summary.merge(summaries)
def main(unused_argv):
print('Constructing models and inputs.')
with tf.variable_scope('model', reuse=None) as training_scope:
images, actions, states = build_tfrecord_input(training=True)
model = Model(images, actions, states, FLAGS.sequence_length,
prefix='train')
with tf.variable_scope('val_model', reuse=None):
val_images, val_actions, val_states = build_tfrecord_input(training=False)
val_model = Model(val_images, val_actions, val_states,
FLAGS.sequence_length, training_scope, prefix='val')
print('Constructing saver.')
# Make saver.
saver = tf.train.Saver(
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=0)
# Make training session.
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter(
FLAGS.event_log_dir, graph=sess.graph, flush_secs=10)
if FLAGS.pretrained_model:
saver.restore(sess, FLAGS.pretrained_model)
tf.train.start_queue_runners(sess)
tf.logging.info('iteration number, cost')
# Run training.
for itr in range(FLAGS.num_iterations):
# Generate new batch of data.
feed_dict = {model.iter_num: np.float32(itr),
model.lr: FLAGS.learning_rate}
cost, _, summary_str = sess.run([model.loss, model.train_op, model.summ_op],
feed_dict)
# Print info: iteration #, cost.
tf.logging.info(str(itr) + ' ' + str(cost))
if (itr) % VAL_INTERVAL == 2:
# Run through validation set.
feed_dict = {val_model.lr: 0.0,
val_model.iter_num: np.float32(itr)}
_, val_summary_str = sess.run([val_model.train_op, val_model.summ_op],
feed_dict)
summary_writer.add_summary(val_summary_str, itr)
if (itr) % SAVE_INTERVAL == 2:
tf.logging.info('Saving model.')
saver.save(sess, FLAGS.output_dir + '/model' + str(itr))
if (itr) % SUMMARY_INTERVAL:
summary_writer.add_summary(summary_str, itr)
tf.logging.info('Saving model.')
saver.save(sess, FLAGS.output_dir + '/model')
tf.logging.info('Training complete')
tf.logging.flush()
if __name__ == '__main__':
app.run()
|
#!/usr/bin/env python
'''
File: gh_status.py
Author: Carno <carnophage at dobramama dot pl>
Description: Simple command line snippet to check GitHub status page
'''
from __future__ import print_function
import sys
try:
import requests
except ImportError:
print('Missing module: requests.')
sys.exit(3)
GH_STATUS_API = 'https://status.github.com/api.json'
class GhStatus(object):
"""Methods for getting current GitHub status"""
def __init__(self):
try:
api_request = requests.get(GH_STATUS_API)
api_request.raise_for_status()
except requests.exceptions.RequestException:
print('Failed to get github:status api')
sys.exit(2)
self.gh_api = api_request.json
if not self.gh_api:
print('Failed to decoded GitHub api json')
sys.exit(2)
self.gh_status = ''
self.gh_last_msg = ''
self.gh_last_msg_time = ''
def get_status(self):
"""Get current github status"""
try:
status_request = requests.get(self.gh_api['status_url'])
except requests.exceptions.RequestException:
print('Failed to get status_url json')
sys.exit(2)
if not status_request.json:
print('Failed to decode status json')
sys.exit(2)
self.gh_status = status_request.json['status']
return self.gh_status
def get_last_msg(self):
"""Get last message from GitHub status page"""
try:
last_msg_request = requests.get(self.gh_api['last_message_url'])
except requests.exceptions.RequestException:
print('Failed to get last_message_url json')
sys.exit(2)
last_msg = last_msg_request.json
if not last_msg:
print('Failed to decode last message json')
sys.exit(2)
self.gh_status = last_msg['status']
self.gh_last_msg = last_msg['body']
self.gh_last_msg_time = last_msg['created_on']
return (self.gh_status, self.gh_last_msg, self.gh_last_msg_time)
def _main():
"""Dummy main function"""
gh_status = GhStatus()
current_status = gh_status.get_status()
if current_status != 'good':
# TODO: display the time in a more sane format
print("GitHub has {0} issues 8^(\nLast update@{2}: {1}".format(gh_status.get_last_msg()))
sys.exit(1)
else:
print('GitHub is good 8^)')
sys.exit(0)
if __name__ == '__main__':
_main()
Adjust to requests version 1.0.4
#!/usr/bin/env python
'''
File: gh_status.py
Author: Carno <carnophage at dobramama dot pl>
Description: Simple command line snippet to check GitHub status page
'''
from __future__ import print_function
import sys
try:
import requests
except ImportError:
print('Missing module: requests.')
sys.exit(3)
GH_STATUS_API = 'https://status.github.com/api.json'
class GhStatus(object):
"""Methods for getting current GitHub status"""
def __init__(self):
try:
api_request = requests.get(GH_STATUS_API)
api_request.raise_for_status()
except requests.exceptions.RequestException:
print('Failed to get github:status api')
sys.exit(2)
self.gh_api = api_request.json()
if not self.gh_api:
print('Failed to decoded GitHub api json')
sys.exit(2)
self.gh_status = ''
self.gh_last_msg = ''
self.gh_last_msg_time = ''
def get_status(self):
"""Get current github status"""
try:
status_request = requests.get(self.gh_api['status_url'])
except requests.exceptions.RequestException:
print('Failed to get status_url json')
sys.exit(2)
if not status_request.json():
print('Failed to decode status json')
sys.exit(2)
self.gh_status = status_request.json()['status']
return self.gh_status
def get_last_msg(self):
"""Get last message from GitHub status page"""
try:
last_msg_request = requests.get(self.gh_api['last_message_url'])
except requests.exceptions.RequestException:
print('Failed to get last_message_url json')
sys.exit(2)
last_msg = last_msg_request.json()
if not last_msg:
print('Failed to decode last message json')
sys.exit(2)
self.gh_status = last_msg['status']
self.gh_last_msg = last_msg['body']
self.gh_last_msg_time = last_msg['created_on']
return (self.gh_status, self.gh_last_msg, self.gh_last_msg_time)
def _main():
"""Dummy main function"""
gh_status = GhStatus()
current_status = gh_status.get_status()
if current_status != 'good':
# TODO: display the time in a more sane format
print("GitHub has {0} issues 8^(\nLast update@{2}: {1}".format(gh_status.get_last_msg()))
sys.exit(1)
else:
print('GitHub is good 8^)')
sys.exit(0)
if __name__ == '__main__':
_main()
|
# coding: utf-8
from django.contrib import admin
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from .models import UserAccount
@admin.register(UserAccount)
class UserAccountAdmin(admin.ModelAdmin):
def get_user_first_name(self, obj):
return obj.user.first_name
get_user_first_name.short_description = _(u'first name')
get_user_first_name.admin_order_field = 'user__first_name'
def get_user_last_name(self, obj):
return obj.user.last_name
get_user_last_name.short_description = _(u'first name')
get_user_last_name.admin_order_field = 'user__last_name'
def get_user_email(self, obj):
return obj.user.email
get_user_email.short_description = _(u'email')
get_user_email.admin_order_field = 'user__email'
list_display = (
'user',
'get_user_email',
'get_user_first_name',
'get_user_last_name'
)
raw_id_fields = ('user',)
search_fields = (
'user__username',
'user__email',
'user__last_name',
'user__first_name'
)
list_filter = (
'user__is_active',
'user__is_staff',
)
from django.contrib.sessions.models import Session
from django.contrib.auth.models import User
@admin.register(Session)
class SessionAdmin(admin.ModelAdmin):
def _session_data(self, obj):
_session_data = u''
decoded = obj.get_decoded()
for key in sorted(decoded):
_session_data += u'<b>' + key + u'</b>: ' + decoded.get(key)
if '_auth_user_id' == key:
try:
user = User.objects.get(id=decoded.get(key))
_session_data += u' (' + user.username + ')'
except Exception as e:
pass
_session_data += u'<br/>'
return format_html(_session_data)
_session_data.allow_tags = True
list_display = ['session_key', '_session_data', 'expire_date']
readonly_fields = ['_session_data']
exclude = ['session_data']
date_hierarchy = 'expire_date'
ordering = ['expire_date']
Enhance user account admin
Current user account admin view displays base information.
Registration admin view is limited to user and activation status,
without filters.
This change enhances user account admin view, to display activation
status and date of join plus last login.
Additionally it enables to resend activation email.
# coding: utf-8
import logging
from django.contrib import admin
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from registration.admin import RegistrationAdmin
from registration.models import RegistrationProfile
from .models import UserAccount
logger = logging.getLogger(__name__)
@admin.register(UserAccount)
class UserAccountAdmin(admin.ModelAdmin):
def get_user_first_name(self, obj):
return obj.user.first_name
get_user_first_name.short_description = _(u'first name')
get_user_first_name.admin_order_field = 'user__first_name'
def get_user_last_name(self, obj):
return obj.user.last_name
get_user_last_name.short_description = _(u'first name')
get_user_last_name.admin_order_field = 'user__last_name'
def get_user_email(self, obj):
return obj.user.email
get_user_email.short_description = _(u'email')
get_user_email.admin_order_field = 'user__email'
list_display = (
'user',
'get_user_email',
'get_user_first_name',
'get_user_last_name'
)
raw_id_fields = ('user',)
search_fields = (
'user__username',
'user__email',
'user__last_name',
'user__first_name'
)
list_filter = (
'user__is_active',
'user__is_staff',
)
class RegistrationProfileAdmin(RegistrationAdmin):
def user_email(self, obj):
return obj.user.email
user_email.short_description = _("email")
user_email.admin_order_field = "user__email"
def user_date_joined(self, obj):
return obj.user.date_joined
user_date_joined.short_description = _("date joined")
user_date_joined.admin_order_field = "user__date_joined"
def user_last_login(self, obj):
return obj.user.last_login
user_last_login.short_description = _("last login")
user_last_login.admin_order_field = "user__last_login"
date_hierarchy = "user__date_joined"
list_display = (
"user",
"user_email",
"activated",
"activation_key_expired",
"user_date_joined",
"user_last_login",
)
list_filter = (
"user__is_active",
"user__is_staff",
"user__last_login",
)
admin.site.unregister(RegistrationProfile)
admin.site.register(RegistrationProfile, RegistrationProfileAdmin)
from django.contrib.sessions.models import Session
from django.contrib.auth.models import User
@admin.register(Session)
class SessionAdmin(admin.ModelAdmin):
def _session_data(self, obj):
_session_data = u''
decoded = obj.get_decoded()
for key in sorted(decoded):
_session_data += u'<b>' + key + u'</b>: ' + decoded.get(key)
if '_auth_user_id' == key:
try:
user = User.objects.get(id=decoded.get(key))
_session_data += u' (' + user.username + ')'
except Exception as e:
pass
_session_data += u'<br/>'
return format_html(_session_data)
_session_data.allow_tags = True
list_display = ['session_key', '_session_data', 'expire_date']
readonly_fields = ['_session_data']
exclude = ['session_data']
date_hierarchy = 'expire_date'
ordering = ['expire_date']
|
# Create your views here.
from django.conf import settings
from rest_framework import viewsets
from rest_framework.renderers import JSONRenderer
from accounts.serializers import UserSerializer, UserExistsSerializer
from django.contrib.auth.models import User
from django.views.generic.detail import DetailView
from django.core.urlresolvers import reverse_lazy
from guardian.mixins import LoginRequiredMixin
from django.views.generic.base import RedirectView
from django.views.generic.edit import DeleteView
from rest_framework.exceptions import PermissionDenied
from accounts import forms
from django.views.generic.edit import ModelFormMixin
from registration.backends.default.views import RegistrationView as BaseRegistrationView
from django.contrib.sites.models import Site
from django.contrib.sites.requests import RequestSite
from registration import signals
from django.template.loader import render_to_string
from registration.models import RegistrationProfile
from django.core.mail import EmailMultiAlternatives
from warnings import warn
from rest_framework.generics import RetrieveAPIView
from rest_framework import filters, permissions
class UserViewSet(viewsets.ModelViewSet):
"""
User view for list/create/retrieve/readMe/update/delete of user account in JSON format only
list: for authenticated user
create: deny to create user via api, use web interface only
retrieve:for authenticated user
readMe: for authenticated user
update: for authenticated user and owner only
delete: deny to delete user via api, use web interface only
"""
model = User
queryset = User.objects.all()
renderer_classes = [JSONRenderer]
serializer_class = UserSerializer
lookup_field = 'username'
lookup_value_regex = '[^/]*'
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
filter_backends = (filters.SearchFilter, filters.OrderingFilter,)
search_fields = ('username', 'email')
ordering_fields = ('username', 'email')
ordering = ('username',)
def create(self, request, *args, **kwargs):
raise PermissionDenied
def readMe(self, request, *args, **kwargs):
kwargs[self.lookup_field] = request.user.username
self.kwargs[self.lookup_field] = request.user.username
return super(UserViewSet, self).retrieve(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
if kwargs[self.lookup_field] != request.user.username:
raise PermissionDenied
return super(UserViewSet, self).update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
if kwargs[self.lookup_field] != request.user.username:
raise PermissionDenied
return super(UserViewSet, self).delete(request, *args, **kwargs)
class UserExists(RetrieveAPIView):
model = User
renderer_classes = [JSONRenderer]
serializer_class = UserExistsSerializer
lookup_field = 'username'
permission_classes = ()
class UserRead(LoginRequiredMixin, DetailView):
"""
User view to display user details in HTML format only
"""
model = User
template_name = 'read.html'
slug_field = 'username'
return_403 = True
class UserReadMe(LoginRequiredMixin, RedirectView):
"""
User view to display current login user details in HTML format only
"""
def get_redirect_url(self, **kwargs):
ret = reverse_lazy('read', args=[self.request.user.username])
return ret
class UserDelete(LoginRequiredMixin, ModelFormMixin, DeleteView):
"""
User view to create user account in HTML format only
"""
model = User
slug_field = 'username'
form_class = forms.UserDelete
success_url = reverse_lazy('index')
return_403 = True
def get_context_data(self, **kwargs):
context = super(UserDelete, self).get_context_data(**kwargs)
context['form'] = self.get_form(self.get_form_class())
return context
def dispatch(self, request, *args, **kwargs):
if request.user.username != kwargs['slug']:
raise PermissionDenied
return super(UserDelete, self).dispatch(request, *args, **kwargs)
class RegistrationView(BaseRegistrationView):
"""
Revised RegistrationView of django-registration to support html email
A registration backend which follows a simple workflow:
1. User signs up, inactive account is created.
2. Email is sent to user with activation link.
3. User clicks activation link, account is now active.
Using this backend requires that
* ``registration`` be listed in the ``INSTALLED_APPS`` setting
(since this backend makes use of models defined in this
application).
* The setting ``ACCOUNT_ACTIVATION_DAYS`` be supplied, specifying
(as an integer) the number of days from registration during
which a user may activate their account (after that period
expires, activation will be disallowed).
* The creation of the templates
``registration/activation_email_subject.txt`` and
``registration/activation_email.txt``, which will be used for
the activation email. See the notes for this backends
``register`` method for details regarding these templates.
Additionally, registration can be temporarily closed by adding the
setting ``REGISTRATION_OPEN`` and setting it to
``False``. Omitting this setting, or setting it to ``True``, will
be interpreted as meaning that registration is currently open and
permitted.
Internally, this is accomplished via storing an activation key in
an instance of ``registration.models.RegistrationProfile``. See
that model and its custom manager for full documentation of its
fields and supported operations.
"""
def register(self, request, **cleaned_data):
"""
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
An email will be sent to the supplied email address; this
email should contain an activation link. The email will be
rendered using two templates. See the documentation for
``RegistrationProfile.send_activation_email()`` for
information about these templates and the contexts provided to
them.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
"""
username, email, password = cleaned_data['username'], cleaned_data['email'], cleaned_data['password1']
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
new_user = RegistrationProfile.objects.create_inactive_user(username.lower(), email,
password, site,
False)
profile = new_user.registrationprofile_set.all()[0]
self.send_activation_email(site, profile)
signals.user_registered.send(sender=self.__class__,
user=new_user,
request=request)
return new_user
def send_activation_email(self, site, profile):
user = profile.user
ctx_dict = {
'activation_key': profile.activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': site,
'email': user.email,
'serverurl': settings.SERVERURL,
'domain': self.request.get_host(),
'protocol': 'https' if self.request.is_secure() else 'http'
}
# Email subject *must not* contain newlines
subject = ''.join(\
render_to_string('registration/activation_email_subject.txt',
ctx_dict).splitlines())
from_email = settings.DEFAULT_FROM_EMAIL
to_email = user.email
text_content = render_to_string('registration/activation_email.txt',
ctx_dict)
try:
html_content = render_to_string('registration/activation_email.html',
ctx_dict)
except:
# If any error occurs during html preperation do not add html content
# This is here to make sure when we switch from default backend to extended
# we do not get any missing here
html_content = None
# XXX we should not catch all exception for this
warn('registration/activation_email.html template cannot be rendered. Make sure you have it to send HTML messages. Will send email as TXT')
msg = EmailMultiAlternatives(subject,
text_content,
from_email,
[to_email])
if html_content:
msg.attach_alternative(html_content, "text/html")
msg.send()
upgrade rest_framework
# Create your views here.
from django.conf import settings
from rest_framework import viewsets
from rest_framework.renderers import JSONRenderer
from accounts.serializers import UserSerializer, UserExistsSerializer
from django.contrib.auth.models import User
from django.views.generic.detail import DetailView
from django.core.urlresolvers import reverse_lazy
from guardian.mixins import LoginRequiredMixin
from django.views.generic.base import RedirectView
from django.views.generic.edit import DeleteView
from rest_framework.exceptions import PermissionDenied
from accounts import forms
from django.views.generic.edit import ModelFormMixin
from registration.backends.default.views import RegistrationView as BaseRegistrationView
from django.contrib.sites.models import Site
from django.contrib.sites.requests import RequestSite
from registration import signals
from django.template.loader import render_to_string
from registration.models import RegistrationProfile
from django.core.mail import EmailMultiAlternatives
from warnings import warn
from rest_framework.generics import RetrieveAPIView
from rest_framework import filters, permissions, decorators
from oauth2_provider.ext.rest_framework import OAuth2Authentication
from registration.backends.hmac import views
from django.contrib.sites.shortcuts import get_current_site
class UserViewSet(viewsets.ReadOnlyModelViewSet):
model = User
queryset = User.objects.all()
renderer_classes = [JSONRenderer]
serializer_class = UserSerializer
lookup_field = 'username'
permission_classes = (permissions.AllowAny,)
lookup_value_regex = '[^/]*'
filter_backends = (filters.SearchFilter, filters.OrderingFilter,)
search_fields = ('username', 'email')
ordering_fields = ('username', 'email')
ordering = ('username',)
class UserMe(RetrieveAPIView):
model = User
renderer_classes = [JSONRenderer]
serializer_classes = UserSerializer
lookup_field = 'username'
authentication_classes = (OAuth2Authentication,)
permission_classes = (permissions.IsAuthenticated,)
def retrieve(self, request, *args, **kwargs):
kwargs[self.lookup_field] = request.user.username
self.kwargs[self.lookup_field] = request.user.username
class UserRead(LoginRequiredMixin, DetailView):
"""
User view to display user details in HTML format only
"""
model = User
template_name = 'read.html'
slug_field = 'username'
return_403 = True
class UserReadMe(LoginRequiredMixin, RedirectView):
"""
User view to display current login user details in HTML format only
"""
def get_redirect_url(self, **kwargs):
ret = reverse_lazy('read', args=[self.request.user.username])
return ret
class UserDelete(LoginRequiredMixin, ModelFormMixin, DeleteView):
"""
User view to create user account in HTML format only
"""
model = User
slug_field = 'username'
form_class = forms.UserDelete
success_url = reverse_lazy('index')
return_403 = True
def get_context_data(self, **kwargs):
context = super(UserDelete, self).get_context_data(**kwargs)
context['form'] = self.get_form(self.get_form_class())
return context
def dispatch(self, request, *args, **kwargs):
if request.user.username != kwargs['slug']:
raise PermissionDenied
return super(UserDelete, self).dispatch(request, *args, **kwargs)
class RegistrationView(views.RegistrationView):
def register(self, form):
new_user = self.create_inactive_user(form)
self.send_activation_email(new_user)
return new_user
def send_activation_email(self, user):
site = get_current_site(self.request)
ctx_dict = {
'activation_key': self.get_activation_key(user),
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': site,
'email': user.email,
'serverurl': settings.SERVERURL,
'domain': self.request.get_host(),
'protocol': 'https' if self.request.is_secure() else 'http'
}
# Email subject *must not* contain newlines
subject = ''.join(\
render_to_string('registration/activation_email_subject.txt',
ctx_dict).splitlines())
from_email = settings.DEFAULT_FROM_EMAIL
to_email = user.email
text_content = render_to_string('registration/activation_email.txt',
ctx_dict)
try:
html_content = render_to_string('registration/activation_email.html',
ctx_dict)
except:
# If any error occurs during html preperation do not add html content
# This is here to make sure when we switch from default backend to extended
# we do not get any missing here
html_content = None
# XXX we should not catch all exception for this
warn('registration/activation_email.html template cannot be rendered. Make sure you have it to send HTML messages. Will send email as TXT')
msg = EmailMultiAlternatives(subject,
text_content,
from_email,
[to_email])
if html_content:
msg.attach_alternative(html_content, "text/html")
msg.send()
|
# Gigapixel controller
# This must run as root (sudo python gigapixel.py) due to framebuffer, etc.
#
# http://www.adafruit.com/products/998 (Raspberry Pi Model B)
# http://www.adafruit.com/products/1601 (PiTFT Mini Kit)
#
# Prerequisite tutorials: aside from the basic Raspbian setup and PiTFT setup
# http://learn.adafruit.com/adafruit-pitft-28-inch-resistive-touchscreen-display-raspberry-pi
#
# gigapixel.py by Anand Kadiyala
# based on lapse.py by David Hunt
# BSD license, all text above must be included in any redistribution.
import wiringpi2
import atexit
import cPickle as pickle
import errno
import fnmatch
import io
import os
import pygame
import threading
import signal
import sys
from pygame.locals import *
from subprocess import call
from time import sleep
from datetime import datetime, timedelta
# UI classes ---------------------------------------------------------------
# Icon is a very simple bitmap class, just associates a name and a pygame
# image (PNG loaded from icons directory) for each.
# There isn't a globally-declared fixed list of Icons. Instead, the list
# is populated at runtime from the contents of the 'icons' directory.
class Icon:
def __init__(self, name):
self.name = name
try:
self.bitmap = pygame.image.load(iconPath + '/' + name + '.png')
except:
pass
# Button is a simple tappable screen region. Each has:
# - bounding rect ((X,Y,W,H) in pixels)
# - optional background color and/or Icon (or None), always centered
# - optional foreground Icon, always centered
# - optional single callback function
# - optional single value passed to callback
# Occasionally Buttons are used as a convenience for positioning Icons
# but the taps are ignored. Stacking order is important; when Buttons
# overlap, lowest/first Button in list takes precedence when processing
# input, and highest/last Button is drawn atop prior Button(s). This is
# used, for example, to center an Icon by creating a passive Button the
# width of the full screen, but with other buttons left or right that
# may take input precedence (e.g. the Effect labels & buttons).
# After Icons are loaded at runtime, a pass is made through the global
# buttons[] list to assign the Icon objects (from names) to each Button.
class Button:
def __init__(self, rect, **kwargs):
self.rect = rect # Bounds
self.color = None # Background fill color, if any
self.iconBg = None # Background Icon (atop color fill)
self.iconFg = None # Foreground Icon (atop background)
self.bg = None # Background Icon name
self.fg = None # Foreground Icon name
self.callback = None # Callback function
self.value = None # Value passed to callback
for key, value in kwargs.iteritems():
if key == 'color': self.color = value
elif key == 'bg' : self.bg = value
elif key == 'fg' : self.fg = value
elif key == 'cb' : self.callback = value
elif key == 'value': self.value = value
def selected(self, pos):
x1 = self.rect[0]
y1 = self.rect[1]
x2 = x1 + self.rect[2] - 1
y2 = y1 + self.rect[3] - 1
if ((pos[0] >= x1) and (pos[0] <= x2) and
(pos[1] >= y1) and (pos[1] <= y2)):
if self.callback:
if self.value is None: self.callback()
else: self.callback(self.value)
return True
return False
def draw(self, screen):
if self.color:
screen.fill(self.color, self.rect)
if self.iconBg:
screen.blit(self.iconBg.bitmap,
(self.rect[0]+(self.rect[2]-self.iconBg.bitmap.get_width())/2,
self.rect[1]+(self.rect[3]-self.iconBg.bitmap.get_height())/2))
if self.iconFg:
screen.blit(self.iconFg.bitmap,
(self.rect[0]+(self.rect[2]-self.iconFg.bitmap.get_width())/2,
self.rect[1]+(self.rect[3]-self.iconFg.bitmap.get_height())/2))
def setBg(self, name):
if name is None:
self.iconBg = None
else:
for i in icons:
if name == i.name:
self.iconBg = i
break
# UI callbacks -------------------------------------------------------------
# These are defined before globals because they're referenced by items in
# the global buttons[] list.
def motorCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global motorRunning
global motorDirection
global motorpin
global motorpinA
global motorpinB
global motorpinC
global motorpinD
if n == 1:
motorDirection = 1
motorpin = motorpinA
if motorRunning == 0:
motorRunning = 1
gpio.digitalWrite(motorpin,gpio.HIGH)
else:
motorRunning = 0
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
elif n == 2:
motorDirection = 0
motorpin = motorpinB
if motorRunning == 0:
motorRunning = 1
gpio.digitalWrite(motorpin,gpio.HIGH)
else:
motorRunning = 0
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
elif n == 3:
motorDirection = 0
motorpin = motorpinC
if motorRunning == 0:
motorRunning = 1
gpio.digitalWrite(motorpin,gpio.HIGH)
else:
motorRunning = 0
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
elif n == 4:
motorDirection = 0
motorpin = motorpinD
if motorRunning == 0:
motorRunning = 1
gpio.digitalWrite(motorpin,gpio.HIGH)
else:
motorRunning = 0
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
def numericCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global numberstring
if n < 10:
numberstring = numberstring + str(n)
elif n == 10:
numberstring = numberstring[:-1]
elif n == 11:
screenMode = 1
elif n == 12:
screenMode = returnScreen
numeric = int(numberstring)
v[dict_idx] = numeric
def settingCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
screenMode += n
if screenMode < 1: screenMode = len(buttons) - 1
elif screenMode >= len(buttons): screenMode = 1
def valuesCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global returnScreen
global numberstring
global numeric
global v
global dict_idx
if n == -1:
screenMode = 0
saveSettings()
if n == 1:
dict_idx='Pulse'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
elif n == 2:
dict_idx='Interval'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
elif n == 3:
dict_idx='Images'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
elif n == 4:
dict_idx='Directions'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
def viewCallback(n): # Viewfinder buttons
global screenMode, screenModePrior
if n is 0: # Gear icon
screenMode = 1
def doneCallback(): # Exit settings
global screenMode
if screenMode > 0:
saveSettings()
screenMode = 0 # Switch back to main window
def startCallback(n): # start/Stop the timelapse thread
global t, busy, threadExited
global currentframe
if n == 1:
if busy == False:
if (threadExited == True):
# Re-instanciate the object for the next start
t = threading.Thread(target=timeLapse)
threadExited = False
t.start()
if n == 0:
if busy == True:
busy = False
t.join()
currentframe = 0
# Re-instanciate the object for the next time around.
t = threading.Thread(target=timeLapse)
def timeLapse():
global v
global settling_time
global shutter_length
global motorpin
global shutterpin
global backlightpin
global busy, threadExited
global currentframe
busy = True
for i in range( 1 , v['Images'] + 1 ):
if busy == False:
break
currentframe = i
gpio.digitalWrite(motorpin,gpio.HIGH)
pulse = float(v['Pulse'])/1000.0
sleep(pulse)
gpio.digitalWrite(motorpin,gpio.LOW)
sleep(settling_time)
# disable the backlight, critical for night timelapses, also saves power
os.system("echo '0' > /sys/class/gpio/gpio252/value")
gpio.digitalWrite(shutterpin,gpio.HIGH)
sleep(shutter_length)
gpio.digitalWrite(shutterpin,gpio.LOW)
# enable the backlight
os.system("echo '1' > /sys/class/gpio/gpio252/value")
interval = float(v['Interval'])/1000.0
if (interval > shutter_length):
sleep(interval - shutter_length)
currentframe = 0
busy = False
threadExited = True
def signal_handler(signal, frame):
print 'got SIGTERM'
pygame.quit()
sys.exit()
# Global stuff -------------------------------------------------------------
t = threading.Thread(target=timeLapse)
busy = False
threadExited = False
screenMode = 0 # Current screen mode; default = viewfinder
screenModePrior = -1 # Prior screen mode (for detecting changes)
iconPath = 'icons' # Subdirectory containing UI bitmaps (PNG format)
numeric = 0 # number from numeric keypad
numberstring = "0"
motorRunning = 0
motorDirection = 0
returnScreen = 0
shutterpin = 17
motorpinA = 18
motorpinB = 27
motorpinC = 22
motorpinD = 23
motorpin = motorpinA
backlightpin = 252
currentframe = 0
framecount = 100
settling_time = 0.2
shutter_length = 0.2
interval_delay = 0.2
dict_idx = "Interval"
v = { "Pulse": 100,
"Interval": 3000,
"Images": 150}
icons = [] # This list gets populated at startup
# buttons[] is a list of lists; each top-level list element corresponds
# to one screen mode (e.g. viewfinder, image playback, storage settings),
# and each element within those lists corresponds to one UI button.
# There's a little bit of repetition (e.g. prev/next buttons are
# declared for each settings screen, rather than a single reusable
# set); trying to reuse those few elements just made for an ugly
# tangle of code elsewhere.
buttons = [
# Screen mode 0 is main view screen of current status
[Button(( 5,180,120, 60), bg='start', cb=startCallback, value=1),
Button((130,180, 60, 60), bg='cog', cb=viewCallback, value=0),
Button((195,180,120, 60), bg='stop', cb=startCallback, value=0)],
# Screen 1 for changing values and setting motor direction
[Button((260, 0, 60, 60), bg='cog', cb=valuesCallback, value=1),
Button((260, 60, 60, 60), bg='cog', cb=valuesCallback, value=2),
Button((260,120, 60, 60), bg='cog', cb=valuesCallback, value=3),
Button(( 0,180,160, 60), bg='ok', cb=valuesCallback, value=-1),
Button((200,180, 60, 60), bg='directions', cb=valuesCallback, value=4)],
# Screen 2 for numeric input
[Button(( 0, 0,320, 60), bg='box'),
Button((180,120, 60, 60), bg='0', cb=numericCallback, value=0),
Button(( 0,180, 60, 60), bg='1', cb=numericCallback, value=1),
Button((120,180, 60, 60), bg='3', cb=numericCallback, value=3),
Button(( 60,180, 60, 60), bg='2', cb=numericCallback, value=2),
Button(( 0,120, 60, 60), bg='4', cb=numericCallback, value=4),
Button(( 60,120, 60, 60), bg='5', cb=numericCallback, value=5),
Button((120,120, 60, 60), bg='6', cb=numericCallback, value=6),
Button(( 0, 60, 60, 60), bg='7', cb=numericCallback, value=7),
Button(( 60, 60, 60, 60), bg='8', cb=numericCallback, value=8),
Button((120, 60, 60, 60), bg='9', cb=numericCallback, value=9),
Button((240,120, 80, 60), bg='del', cb=numericCallback, value=10),
Button((180,180,140, 60), bg='ok', cb=numericCallback, value=12),
Button((180, 60,140, 60), bg='cancel',cb=numericCallback, value=11)]
# Screen 3 for setting motor direction
]
# Assorted utility functions -----------------------------------------------
def saveSettings():
global v
try:
outfile = open('lapse.pkl', 'wb')
# Use a dictionary (rather than pickling 'raw' values) so
# the number & order of things can change without breaking.
pickle.dump(v, outfile)
outfile.close()
except:
pass
def loadSettings():
global v
try:
infile = open('lapse.pkl', 'rb')
v = pickle.load(infile)
infile.close()
except:
pass
# Initialization -----------------------------------------------------------
# Init framebuffer/touchscreen environment variables
os.putenv('SDL_VIDEODRIVER', 'fbcon')
os.putenv('SDL_FBDEV' , '/dev/fb1')
os.putenv('SDL_MOUSEDRV' , 'TSLIB')
os.putenv('SDL_MOUSEDEV' , '/dev/input/touchscreen')
# Init pygame and screen
print "Initting..."
pygame.init()
print "Setting Mouse invisible..."
pygame.mouse.set_visible(False)
print "Setting fullscreen..."
modes = pygame.display.list_modes(16)
screen = pygame.display.set_mode(modes[0], FULLSCREEN, 16)
print "Loading Icons..."
# Load all icons at startup.
for file in os.listdir(iconPath):
if fnmatch.fnmatch(file, '*.png'):
icons.append(Icon(file.split('.')[0]))
# Assign Icons to Buttons, now that they're loaded
print"Assigning Buttons"
for s in buttons: # For each screenful of buttons...
for b in s: # For each button on screen...
for i in icons: # For each icon...
if b.bg == i.name: # Compare names; match?
b.iconBg = i # Assign Icon to Button
b.bg = None # Name no longer used; allow garbage collection
if b.fg == i.name:
b.iconFg = i
b.fg = None
# Set up GPIO pins
print "Init GPIO pins..."
gpio = wiringpi2.GPIO(wiringpi2.GPIO.WPI_MODE_GPIO)
gpio.pinMode(shutterpin,gpio.OUTPUT)
gpio.pinMode(motorpinA,gpio.OUTPUT)
gpio.pinMode(motorpinB,gpio.OUTPUT)
gpio.pinMode(motorpinC,gpio.OUTPUT)
gpio.pinMode(motorpinD,gpio.OUTPUT)
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
# I couldnt seem to get at pin 252 for the backlight using the usual method above,
# but this seems to work
os.system("echo 252 > /sys/class/gpio/export")
os.system("echo 'out' > /sys/class/gpio/gpio252/direction")
os.system("echo '1' > /sys/class/gpio/gpio252/value")
print"Load Settings"
loadSettings() # Must come last; fiddles with Button/Icon states
print "loading background.."
img = pygame.image.load("icons/GigapixelPi.png")
if img is None or img.get_height() < 240: # Letterbox, clear background
screen.fill(0)
if img:
screen.blit(img,
((320 - img.get_width() ) / 2,
(240 - img.get_height()) / 2))
pygame.display.update()
sleep(2)
# Main loop ----------------------------------------------------------------
signal.signal(signal.SIGTERM, signal_handler)
print "mainloop.."
while(True):
# Process touchscreen input
while True:
for event in pygame.event.get():
if(event.type is MOUSEBUTTONDOWN):
pos = pygame.mouse.get_pos()
for b in buttons[screenMode]:
if b.selected(pos): break
elif(event.type is MOUSEBUTTONUP):
motorRunning = 0
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
if screenMode >= 0 or screenMode != screenModePrior: break
if img is None or img.get_height() < 240: # Letterbox, clear background
screen.fill(0)
if img:
screen.blit(img,
((320 - img.get_width() ) / 2,
(240 - img.get_height()) / 2))
# Overlay buttons on display and update
for i,b in enumerate(buttons[screenMode]):
b.draw(screen)
if screenMode == 2:
myfont = pygame.font.SysFont("Arial", 50)
label = myfont.render(numberstring, 1, (255,255,255))
screen.blit(label, (10, 2))
if screenMode == 1:
myfont = pygame.font.SysFont("Arial", 30)
label = myfont.render("Pulse:" , 1, (255,255,255))
screen.blit(label, (10, 10))
label = myfont.render("Interval:" , 1, (255,255,255))
screen.blit(label, (10, 70))
label = myfont.render("Frames:" , 1, (255,255,255))
screen.blit(label, (10,130))
label = myfont.render(str(v['Pulse']) + "ms" , 1, (255,255,255))
screen.blit(label, (130, 10))
label = myfont.render(str(v['Interval']) + "ms" , 1, (255,255,255))
screen.blit(label, (130, 70))
label = myfont.render(str(v['Images']) , 1, (255,255,255))
screen.blit(label, (130,130))
if screenMode == 0:
myfont = pygame.font.SysFont("Arial", 30)
label = myfont.render("Pulse:" , 1, (255,255,255))
screen.blit(label, (10, 10))
label = myfont.render("Interval:" , 1, (255,255,255))
screen.blit(label, (10, 50))
label = myfont.render("Frames:" , 1, (255,255,255))
screen.blit(label, (10, 90))
label = myfont.render("Remaining:" , 1, (255,255,255))
screen.blit(label, (10,130))
label = myfont.render(str(v['Pulse']) + "ms" , 1, (255,255,255))
screen.blit(label, (160, 10))
label = myfont.render(str(v['Interval']) + "ms" , 1, (255,255,255))
screen.blit(label, (160, 50))
label = myfont.render(str(currentframe) + " of " + str(v['Images']) , 1, (255,255,255))
screen.blit(label, (160, 90))
intervalLength = float((v['Pulse'] + v['Interval'] + (settling_time*1000) + (shutter_length*1000)))
remaining = float((intervalLength * (v['Images'] - currentframe)) / 1000)
sec = timedelta(seconds=int(remaining))
d = datetime(1,1,1) + sec
remainingStr = "%dh%dm%ds" % (d.hour, d.minute, d.second)
label = myfont.render(remainingStr , 1, (255,255,255))
screen.blit(label, (160, 130))
pygame.display.update()
screenModePrior = screenMode
screen
# Gigapixel controller
# This must run as root (sudo python gigapixel.py) due to framebuffer, etc.
#
# http://www.adafruit.com/products/998 (Raspberry Pi Model B)
# http://www.adafruit.com/products/1601 (PiTFT Mini Kit)
#
# Prerequisite tutorials: aside from the basic Raspbian setup and PiTFT setup
# http://learn.adafruit.com/adafruit-pitft-28-inch-resistive-touchscreen-display-raspberry-pi
#
# gigapixel.py by Anand Kadiyala
# based on lapse.py by David Hunt
# BSD license, all text above must be included in any redistribution.
import wiringpi2
import atexit
import cPickle as pickle
import errno
import fnmatch
import io
import os
import pygame
import threading
import signal
import sys
from pygame.locals import *
from subprocess import call
from time import sleep
from datetime import datetime, timedelta
# UI classes ---------------------------------------------------------------
# Icon is a very simple bitmap class, just associates a name and a pygame
# image (PNG loaded from icons directory) for each.
# There isn't a globally-declared fixed list of Icons. Instead, the list
# is populated at runtime from the contents of the 'icons' directory.
class Icon:
def __init__(self, name):
self.name = name
try:
self.bitmap = pygame.image.load(iconPath + '/' + name + '.png')
except:
pass
# Button is a simple tappable screen region. Each has:
# - bounding rect ((X,Y,W,H) in pixels)
# - optional background color and/or Icon (or None), always centered
# - optional foreground Icon, always centered
# - optional single callback function
# - optional single value passed to callback
# Occasionally Buttons are used as a convenience for positioning Icons
# but the taps are ignored. Stacking order is important; when Buttons
# overlap, lowest/first Button in list takes precedence when processing
# input, and highest/last Button is drawn atop prior Button(s). This is
# used, for example, to center an Icon by creating a passive Button the
# width of the full screen, but with other buttons left or right that
# may take input precedence (e.g. the Effect labels & buttons).
# After Icons are loaded at runtime, a pass is made through the global
# buttons[] list to assign the Icon objects (from names) to each Button.
class Button:
def __init__(self, rect, **kwargs):
self.rect = rect # Bounds
self.color = None # Background fill color, if any
self.iconBg = None # Background Icon (atop color fill)
self.iconFg = None # Foreground Icon (atop background)
self.bg = None # Background Icon name
self.fg = None # Foreground Icon name
self.callback = None # Callback function
self.value = None # Value passed to callback
for key, value in kwargs.iteritems():
if key == 'color': self.color = value
elif key == 'bg' : self.bg = value
elif key == 'fg' : self.fg = value
elif key == 'cb' : self.callback = value
elif key == 'value': self.value = value
def selected(self, pos):
x1 = self.rect[0]
y1 = self.rect[1]
x2 = x1 + self.rect[2] - 1
y2 = y1 + self.rect[3] - 1
if ((pos[0] >= x1) and (pos[0] <= x2) and
(pos[1] >= y1) and (pos[1] <= y2)):
if self.callback:
if self.value is None: self.callback()
else: self.callback(self.value)
return True
return False
def draw(self, screen):
if self.color:
screen.fill(self.color, self.rect)
if self.iconBg:
screen.blit(self.iconBg.bitmap,
(self.rect[0]+(self.rect[2]-self.iconBg.bitmap.get_width())/2,
self.rect[1]+(self.rect[3]-self.iconBg.bitmap.get_height())/2))
if self.iconFg:
screen.blit(self.iconFg.bitmap,
(self.rect[0]+(self.rect[2]-self.iconFg.bitmap.get_width())/2,
self.rect[1]+(self.rect[3]-self.iconFg.bitmap.get_height())/2))
def setBg(self, name):
if name is None:
self.iconBg = None
else:
for i in icons:
if name == i.name:
self.iconBg = i
break
# UI callbacks -------------------------------------------------------------
# These are defined before globals because they're referenced by items in
# the global buttons[] list.
def motorCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global motorRunning
global motorDirection
global motorpin
global motorpinA
global motorpinB
global motorpinC
global motorpinD
if n == 1:
motorDirection = 1
motorpin = motorpinA
if motorRunning == 0:
motorRunning = 1
gpio.digitalWrite(motorpin,gpio.HIGH)
else:
motorRunning = 0
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
elif n == 2:
motorDirection = 0
motorpin = motorpinB
if motorRunning == 0:
motorRunning = 1
gpio.digitalWrite(motorpin,gpio.HIGH)
else:
motorRunning = 0
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
elif n == 3:
motorDirection = 0
motorpin = motorpinC
if motorRunning == 0:
motorRunning = 1
gpio.digitalWrite(motorpin,gpio.HIGH)
else:
motorRunning = 0
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
elif n == 4:
motorDirection = 0
motorpin = motorpinD
if motorRunning == 0:
motorRunning = 1
gpio.digitalWrite(motorpin,gpio.HIGH)
else:
motorRunning = 0
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
def numericCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global numberstring
if n < 10:
numberstring = numberstring + str(n)
elif n == 10:
numberstring = numberstring[:-1]
elif n == 11:
screenMode = 1
elif n == 12:
screenMode = returnScreen
numeric = int(numberstring)
v[dict_idx] = numeric
def settingCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
screenMode += n
if screenMode < 1: screenMode = len(buttons) - 1
elif screenMode >= len(buttons): screenMode = 1
def valuesCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global returnScreen
global numberstring
global numeric
global v
global dict_idx
if n == -1:
screenMode = 0
saveSettings()
if n == 1:
dict_idx='Pulse'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
elif n == 2:
dict_idx='Interval'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
elif n == 3:
dict_idx='Images'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
elif n == 4:
dict_idx='Directions'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
def viewCallback(n): # Viewfinder buttons
global screenMode, screenModePrior
if n is 0: # Gear icon
screenMode = 1
def doneCallback(): # Exit settings
global screenMode
if screenMode > 0:
saveSettings()
screenMode = 0 # Switch back to main window
def startCallback(n): # start/Stop the timelapse thread
global t, busy, threadExited
global currentframe
if n == 1:
if busy == False:
if (threadExited == True):
# Re-instanciate the object for the next start
t = threading.Thread(target=timeLapse)
threadExited = False
t.start()
if n == 0:
if busy == True:
busy = False
t.join()
currentframe = 0
# Re-instanciate the object for the next time around.
t = threading.Thread(target=timeLapse)
def timeLapse():
global v
global settling_time
global shutter_length
global motorpin
global shutterpin
global backlightpin
global busy, threadExited
global currentframe
busy = True
for i in range( 1 , v['Images'] + 1 ):
if busy == False:
break
currentframe = i
gpio.digitalWrite(motorpin,gpio.HIGH)
pulse = float(v['Pulse'])/1000.0
sleep(pulse)
gpio.digitalWrite(motorpin,gpio.LOW)
sleep(settling_time)
# disable the backlight, critical for night timelapses, also saves power
os.system("echo '0' > /sys/class/gpio/gpio252/value")
gpio.digitalWrite(shutterpin,gpio.HIGH)
sleep(shutter_length)
gpio.digitalWrite(shutterpin,gpio.LOW)
# enable the backlight
os.system("echo '1' > /sys/class/gpio/gpio252/value")
interval = float(v['Interval'])/1000.0
if (interval > shutter_length):
sleep(interval - shutter_length)
currentframe = 0
busy = False
threadExited = True
def signal_handler(signal, frame):
print 'got SIGTERM'
pygame.quit()
sys.exit()
# Global stuff -------------------------------------------------------------
t = threading.Thread(target=timeLapse)
busy = False
threadExited = False
screenMode = 0 # Current screen mode; default = viewfinder
screenModePrior = -1 # Prior screen mode (for detecting changes)
iconPath = 'icons' # Subdirectory containing UI bitmaps (PNG format)
numeric = 0 # number from numeric keypad
numberstring = "0"
motorRunning = 0
motorDirection = 0
returnScreen = 0
shutterpin = 17
motorpinA = 18
motorpinB = 27
motorpinC = 22
motorpinD = 23
motorpin = motorpinA
backlightpin = 252
currentframe = 0
framecount = 100
settling_time = 0.2
shutter_length = 0.2
interval_delay = 0.2
dict_idx = "Interval"
v = { "Pulse": 100,
"Interval": 3000,
"Images": 150}
icons = [] # This list gets populated at startup
# buttons[] is a list of lists; each top-level list element corresponds
# to one screen mode (e.g. viewfinder, image playback, storage settings),
# and each element within those lists corresponds to one UI button.
# There's a little bit of repetition (e.g. prev/next buttons are
# declared for each settings screen, rather than a single reusable
# set); trying to reuse those few elements just made for an ugly
# tangle of code elsewhere.
buttons = [
# Screen mode 0 is main view screen of current status
[Button(( 5,180,120, 60), bg='start', cb=startCallback, value=1),
Button((130,180, 60, 60), bg='cog', cb=viewCallback, value=0),
Button((195,180,120, 60), bg='stop', cb=startCallback, value=0)],
# Screen 1 for changing values and setting motor direction
[Button((260, 0, 60, 60), bg='cog', cb=valuesCallback, value=1),
Button((260, 60, 60, 60), bg='cog', cb=valuesCallback, value=2),
Button((260,120, 60, 60), bg='cog', cb=valuesCallback, value=3),
Button(( 0,180,160, 60), bg='ok', cb=valuesCallback, value=-1),
Button((200,180, 60, 60), bg='directions', cb=valuesCallback, value=4)],
# Screen 2 for numeric input
[Button(( 0, 0,320, 60), bg='box'),
Button((180,120, 60, 60), bg='0', cb=numericCallback, value=0),
Button(( 0,180, 60, 60), bg='1', cb=numericCallback, value=1),
Button((120,180, 60, 60), bg='3', cb=numericCallback, value=3),
Button(( 60,180, 60, 60), bg='2', cb=numericCallback, value=2),
Button(( 0,120, 60, 60), bg='4', cb=numericCallback, value=4),
Button(( 60,120, 60, 60), bg='5', cb=numericCallback, value=5),
Button((120,120, 60, 60), bg='6', cb=numericCallback, value=6),
Button(( 0, 60, 60, 60), bg='7', cb=numericCallback, value=7),
Button(( 60, 60, 60, 60), bg='8', cb=numericCallback, value=8),
Button((120, 60, 60, 60), bg='9', cb=numericCallback, value=9),
Button((240,120, 80, 60), bg='del', cb=numericCallback, value=10),
Button((180,180,140, 60), bg='ok', cb=numericCallback, value=12),
Button((180, 60,140, 60), bg='cancel',cb=numericCallback, value=11)],
# Screen 3 for setting motor direction
[Button((180,120, 60, 60), bg='left', cb=numericCallback, value=0),
Button(( 0,180, 60, 60), bg='right', cb=numericCallback, value=1),
Button((120,180, 60, 60), bg='up', cb=numericCallback, value=3),
Button(( 60,180, 60, 60), bg='down', cb=numericCallback, value=2),
Button(( 0,120, 60, 60), bg='4', cb=numericCallback, value=4),
Button(( 60,120, 60, 60), bg='5', cb=numericCallback, value=5),
Button((120,120, 60, 60), bg='6', cb=numericCallback, value=6),
Button(( 0, 60, 60, 60), bg='7', cb=numericCallback, value=7),
Button(( 60, 60, 60, 60), bg='8', cb=numericCallback, value=8),
Button((120, 60, 60, 60), bg='9', cb=numericCallback, value=9),
Button((240,120, 80, 60), bg='del', cb=numericCallback, value=10),
Button((180,180,140, 60), bg='ok', cb=numericCallback, value=12),
Button((180, 60,140, 60), bg='cancel',cb=numericCallback, value=11)]
]
# Assorted utility functions -----------------------------------------------
def saveSettings():
global v
try:
outfile = open('lapse.pkl', 'wb')
# Use a dictionary (rather than pickling 'raw' values) so
# the number & order of things can change without breaking.
pickle.dump(v, outfile)
outfile.close()
except:
pass
def loadSettings():
global v
try:
infile = open('lapse.pkl', 'rb')
v = pickle.load(infile)
infile.close()
except:
pass
# Initialization -----------------------------------------------------------
# Init framebuffer/touchscreen environment variables
os.putenv('SDL_VIDEODRIVER', 'fbcon')
os.putenv('SDL_FBDEV' , '/dev/fb1')
os.putenv('SDL_MOUSEDRV' , 'TSLIB')
os.putenv('SDL_MOUSEDEV' , '/dev/input/touchscreen')
# Init pygame and screen
print "Initting..."
pygame.init()
print "Setting Mouse invisible..."
pygame.mouse.set_visible(False)
print "Setting fullscreen..."
modes = pygame.display.list_modes(16)
screen = pygame.display.set_mode(modes[0], FULLSCREEN, 16)
print "Loading Icons..."
# Load all icons at startup.
for file in os.listdir(iconPath):
if fnmatch.fnmatch(file, '*.png'):
icons.append(Icon(file.split('.')[0]))
# Assign Icons to Buttons, now that they're loaded
print"Assigning Buttons"
for s in buttons: # For each screenful of buttons...
for b in s: # For each button on screen...
for i in icons: # For each icon...
if b.bg == i.name: # Compare names; match?
b.iconBg = i # Assign Icon to Button
b.bg = None # Name no longer used; allow garbage collection
if b.fg == i.name:
b.iconFg = i
b.fg = None
# Set up GPIO pins
print "Init GPIO pins..."
gpio = wiringpi2.GPIO(wiringpi2.GPIO.WPI_MODE_GPIO)
gpio.pinMode(shutterpin,gpio.OUTPUT)
gpio.pinMode(motorpinA,gpio.OUTPUT)
gpio.pinMode(motorpinB,gpio.OUTPUT)
gpio.pinMode(motorpinC,gpio.OUTPUT)
gpio.pinMode(motorpinD,gpio.OUTPUT)
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
# I couldnt seem to get at pin 252 for the backlight using the usual method above,
# but this seems to work
os.system("echo 252 > /sys/class/gpio/export")
os.system("echo 'out' > /sys/class/gpio/gpio252/direction")
os.system("echo '1' > /sys/class/gpio/gpio252/value")
print"Load Settings"
loadSettings() # Must come last; fiddles with Button/Icon states
print "loading background.."
img = pygame.image.load("icons/GigapixelPi.png")
if img is None or img.get_height() < 240: # Letterbox, clear background
screen.fill(0)
if img:
screen.blit(img,
((320 - img.get_width() ) / 2,
(240 - img.get_height()) / 2))
pygame.display.update()
sleep(2)
# Main loop ----------------------------------------------------------------
signal.signal(signal.SIGTERM, signal_handler)
print "mainloop.."
while(True):
# Process touchscreen input
while True:
for event in pygame.event.get():
if(event.type is MOUSEBUTTONDOWN):
pos = pygame.mouse.get_pos()
for b in buttons[screenMode]:
if b.selected(pos): break
elif(event.type is MOUSEBUTTONUP):
motorRunning = 0
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
if screenMode >= 0 or screenMode != screenModePrior: break
if img is None or img.get_height() < 240: # Letterbox, clear background
screen.fill(0)
if img:
screen.blit(img,
((320 - img.get_width() ) / 2,
(240 - img.get_height()) / 2))
# Overlay buttons on display and update
for i,b in enumerate(buttons[screenMode]):
b.draw(screen)
if screenMode == 2:
myfont = pygame.font.SysFont("Arial", 50)
label = myfont.render(numberstring, 1, (255,255,255))
screen.blit(label, (10, 2))
if screenMode == 1:
myfont = pygame.font.SysFont("Arial", 30)
label = myfont.render("Pulse:" , 1, (255,255,255))
screen.blit(label, (10, 10))
label = myfont.render("Interval:" , 1, (255,255,255))
screen.blit(label, (10, 70))
label = myfont.render("Frames:" , 1, (255,255,255))
screen.blit(label, (10,130))
label = myfont.render(str(v['Pulse']) + "ms" , 1, (255,255,255))
screen.blit(label, (130, 10))
label = myfont.render(str(v['Interval']) + "ms" , 1, (255,255,255))
screen.blit(label, (130, 70))
label = myfont.render(str(v['Images']) , 1, (255,255,255))
screen.blit(label, (130,130))
if screenMode == 0:
myfont = pygame.font.SysFont("Arial", 30)
label = myfont.render("Pulse:" , 1, (255,255,255))
screen.blit(label, (10, 10))
label = myfont.render("Interval:" , 1, (255,255,255))
screen.blit(label, (10, 50))
label = myfont.render("Frames:" , 1, (255,255,255))
screen.blit(label, (10, 90))
label = myfont.render("Remaining:" , 1, (255,255,255))
screen.blit(label, (10,130))
label = myfont.render(str(v['Pulse']) + "ms" , 1, (255,255,255))
screen.blit(label, (160, 10))
label = myfont.render(str(v['Interval']) + "ms" , 1, (255,255,255))
screen.blit(label, (160, 50))
label = myfont.render(str(currentframe) + " of " + str(v['Images']) , 1, (255,255,255))
screen.blit(label, (160, 90))
intervalLength = float((v['Pulse'] + v['Interval'] + (settling_time*1000) + (shutter_length*1000)))
remaining = float((intervalLength * (v['Images'] - currentframe)) / 1000)
sec = timedelta(seconds=int(remaining))
d = datetime(1,1,1) + sec
remainingStr = "%dh%dm%ds" % (d.hour, d.minute, d.second)
label = myfont.render(remainingStr , 1, (255,255,255))
screen.blit(label, (160, 130))
pygame.display.update()
screenModePrior = screenMode
|
import json
import tweepy
import urllib
import urllib2
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.core.mail import send_mail
from django.views.generic.simple import redirect_to
from annoying.decorators import ajax_request
from annoying.decorators import render_to
from accounts.models import TwitterInfo, UserProfile
from accounts.models import DeliciousInfo
from accounts.renderers import connection_table_renderer
from api.models import MuteList
from api.models import Tag
from api.models import WhiteListItem
from common.view_helpers import _template_values, JSONResponse
from common.helpers import put_profile_pic
from common.admin import email_templates
from eyebrowse.log import logger
from eyebrowse.settings import TWITTER_CONSUMER_KEY
from eyebrowse.settings import TWITTER_CONSUMER_SECRET
from eyebrowse.settings import DELICIOUS_CONSUMER_KEY
from eyebrowse.settings import DELICIOUS_CONSUMER_SECRET
from notifications.models import Notification, NoticeType, send_now
from common.management.commands.update_popular_history import Command
@login_required
@render_to('accounts/whitelist.html')
def whitelist(request):
"""
Edit whitelist entries
"""
whitelist = WhiteListItem.objects.filter(user=request.user)
return _template_values(request,
page_title="edit sharelist",
header="sharelist",
navbar='nav_account',
sub_navbar="subnav_whitelist",
whitelist=whitelist)
@login_required
@render_to('accounts/mutelist.html')
def mutelist(request):
"""
Edit mutelist entries
"""
mutelist = MuteList.objects.filter(user=request.user)
return _template_values(request,
page_title="edit mute list",
header="mutelist",
navbar='nav_account',
sub_navbar="subnav_mutelist",
mutelist=mutelist)
@login_required
@render_to('accounts/account.html')
def account(request):
"""
Edit account info
"""
user = request.user
if request.POST and request.is_ajax():
success = False
errors = {}
data = None
type = request.POST.get('form_type', None)
if type == 'account-info':
first_name = request.POST.get('first_name', '')
last_name = request.POST.get('last_name', '')
anon_email = request.POST.get('anon_checkbox', False) == 'True'
location = request.POST.get('location', '')
website = request.POST.get('website', '')
bio = request.POST.get('bio', '')
user.first_name = first_name
user.last_name = last_name
user.save()
profile = user.profile
profile.anon_email = anon_email
profile.location = location
profile.website = website
profile.bio = bio
profile.save()
success = "User info updated!"
elif type == 'pic':
pic_url = request.POST.get('pic_url')
# download and upload to our S3
pic_url = put_profile_pic(pic_url, user.profile)
if pic_url: # no errors/less than 1mb #patlsotw
user.profile.pic_url = pic_url
user.profile.save()
success = "Profile picture changed!"
else:
errors['pic'] = ['Oops -- something went wrong.']
resp = {
'success': success,
'errors': errors,
'type': type,
'data': data,
}
return JSONResponse(resp)
return _template_values(request,
page_title="edit sharelist",
header="account info",
navbar='nav_account',
sub_navbar="subnav_account_info")
@login_required
@render_to('accounts/connections.html')
def connections(request):
"""
Edit connection (following/followers)
"""
following = request.user.profile.follows.all()
followers = request.user.profile.followed_by.all()
rendered_following = connection_table_renderer(
following, 'following', following)
rendered_followers = connection_table_renderer(
followers, 'followers', following)
template_dict = {
"rendered_followers": rendered_followers,
"rendered_following": rendered_following,
"header": connections,
}
return _template_values(request,
page_title="edit connections",
navbar='nav_account',
sub_navbar="subnav_connections",
**template_dict)
@login_required
@render_to('accounts/sync_delicious.html')
def sync_delicious(request):
"""
Edit connection (following/followers)
"""
user = request.user
template_dict = {"connected": False,
"synced": "You are not connected to Eyebrowse."}
delicious_info = DeliciousInfo.objects.filter(user=user)
if len(delicious_info) > 0:
template_dict[
"synced"] = "Your Delicious account is already connected to Eyebrowse."
template_dict['connected'] = True
else:
if "code" in request.GET:
code = request.GET.get("code")
data = urllib.urlencode(
{'client_id': DELICIOUS_CONSUMER_KEY,
'client_secret': DELICIOUS_CONSUMER_SECRET,
'grant_type': "code",
'redirect_uri': "http://eyebrowse.csail.mit.edu/accounts/profile/sync_delicious",
'code': code,
})
results = json.loads(urllib2.urlopen(
'https://avosapi.delicious.com/api/v1/oauth/token',
data).read())
access_token = results["access_token"]
DeliciousInfo.objects.create(
user=user, access_token=access_token)
template_dict[
"synced"] = "Your Delicious account is now connected to Eyebrowse!"
template_dict['connected'] = True
else:
return redirect_to(request,
"https://delicious.com/auth/authorize?client_id=" +
DELICIOUS_CONSUMER_KEY +
"&redirect_uri=http://eyebrowse.csail.mit.edu/accounts/profile/sync_delicious")
return _template_values(request,
page_title="Connect Delicious",
navbar='nav_account',
sub_navbar="subnav_sync_delicious",
**template_dict)
@login_required
@render_to('accounts/edit_tag.html')
def edit_tags(request):
user = request.user
tags = Tag.objects.filter(user=user)
tag_dict = {}
for tag in tags:
if tag.name in tag_dict:
tag_dict[tag.name].append(tag)
else:
tag_dict[tag.name] = [tag]
template_dict = {"tags": tag_dict.values()}
return _template_values(request,
page_title="Edit My Tags",
navbar='nav_account',
sub_navbar="subnav_edit_tags",
**template_dict)
def get_twitter_info(request, api, twit_obj, template_dict):
template_dict['connected'] = True
template_dict['username'] = twit_obj.twitter_username
eye_friends = UserProfile.objects.get(user=request.user).follows.all().values_list('user__username', flat=True)
friends = api.friends_ids()
twitter_friends = TwitterInfo.objects.filter(twitter_id__in=friends)
for friend in twitter_friends:
friend.follows = str(friend.user.username in eye_friends)
template_dict['twitter_friends'] = twitter_friends
@login_required
@render_to('accounts/sync_twitter.html')
def sync_twitter(request):
"""
Edit connection (following/followers)
"""
user = request.user
template_dict = {"connected": False,
"synced": "You are not connected to Eyebrowse."}
auth = tweepy.OAuthHandler(
TWITTER_CONSUMER_KEY,
TWITTER_CONSUMER_SECRET,
"http://eyebrowse.csail.mit.edu/accounts/profile/sync_twitter")
twitter_info = TwitterInfo.objects.filter(user=user)
if len(twitter_info) > 0:
auth.set_access_token(
twitter_info[0].access_token,
twitter_info[0].access_token_secret)
api = tweepy.API(auth)
template_dict[
"synced"] = "Your Twitter account is already connected to Eyebrowse."
get_twitter_info(request, api, twitter_info[0], template_dict)
else:
if "request_token" in request.session:
token = request.session.pop("request_token")
auth.request_token = token
try:
verifier = request.GET.get('oauth_verifier')
auth.get_access_token(verifier)
token = auth.access_token
secret = auth.access_token_secret
api = tweepy.API(auth)
twitter_user = api.me()
username = twitter_user.screen_name
twitter_id = twitter_user.id
twit_obj = TwitterInfo.objects.create(
user=user, twitter_username=username, twitter_id=twitter_id,
access_token=token, access_token_secret=secret)
template_dict[
"synced"] = "Your Twitter account is now connected to Eyebrowse!"
get_twitter_info(request, api, twit_obj, template_dict)
except tweepy.TweepError, e:
logger.info(e)
logger.info("Error! Failed to get access token")
else:
logger.info("no request_token")
try:
redirect_rule = auth.get_authorization_url()
request.session["request_token"] = auth.request_token
return redirect_to(request, redirect_rule)
except tweepy.TweepError, e:
logger.info(e)
logger.info("Error! Failed to get request token")
return _template_values(request,
page_title="Connect Twitter",
navbar='nav_account',
sub_navbar="subnav_sync_twitter",
**template_dict)
@login_required
@ajax_request
def connect(request):
success = False
errors = {}
data = None
req_prof = request.user.profile
if request.POST and request.is_ajax():
type = request.POST.get('type', None)
username = request.POST.get('user', None)
if type and username:
user = User.objects.filter(username=username)
if user.exists():
user = user[0]
else:
user = None
if not user:
errors['user'] = "Requested user %s not found." % username
elif user.profile == req_prof:
errors['user'] = "Cannot follow yourself."
else:
if type == 'add-follow':
req_prof.follows.add(user.profile)
try:
c = Command()
c.user_populate_history(request.user, user)
except Exception, e:
print e
notice = NoticeType.objects.get(label="new_follower")
Notification.objects.create(recipient=user, sender=request.user, notice_type=notice)
send_now([user], "new_follower", sender=request.user)
elif type == 'rm-follow' and req_prof.follows.filter(
user=user).exists():
req_prof.follows.remove(user)
try:
c = Command()
c.remove_user_populate_history(request.user, user)
except Exception, e:
print e
success = True
data = {
'type': type,
'user': username,
}
else:
errors[
'user'] = 'Username required. Provided %s as username.' % username
errors['type'] = 'Type required. Provided %s as type.' % type
resp = {
'success': success,
'errors': errors,
'data': data,
}
return resp
fourth attemp
import json
import tweepy
import urllib
import urllib2
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.core.mail import send_mail
from django.views.generic.simple import redirect_to
from annoying.decorators import ajax_request
from annoying.decorators import render_to
from accounts.models import TwitterInfo, UserProfile
from accounts.models import DeliciousInfo
from accounts.renderers import connection_table_renderer
from api.models import MuteList
from api.models import Tag
from api.models import WhiteListItem
from common.view_helpers import _template_values, JSONResponse
from common.helpers import put_profile_pic
from common.admin import email_templates
from eyebrowse.log import logger
from eyebrowse.settings import TWITTER_CONSUMER_KEY
from eyebrowse.settings import TWITTER_CONSUMER_SECRET
from eyebrowse.settings import DELICIOUS_CONSUMER_KEY
from eyebrowse.settings import DELICIOUS_CONSUMER_SECRET
from notifications.models import Notification, NoticeType, send_now
from common.management.commands.update_popular_history import Command
@login_required
@render_to('accounts/whitelist.html')
def whitelist(request):
"""
Edit whitelist entries
"""
whitelist = WhiteListItem.objects.filter(user=request.user)
return _template_values(request,
page_title="edit sharelist",
header="sharelist",
navbar='nav_account',
sub_navbar="subnav_whitelist",
whitelist=whitelist)
@login_required
@render_to('accounts/mutelist.html')
def mutelist(request):
"""
Edit mutelist entries
"""
mutelist = MuteList.objects.filter(user=request.user)
return _template_values(request,
page_title="edit mute list",
header="mutelist",
navbar='nav_account',
sub_navbar="subnav_mutelist",
mutelist=mutelist)
@login_required
@render_to('accounts/account.html')
def account(request):
"""
Edit account info
"""
user = request.user
if request.POST and request.is_ajax():
success = False
errors = {}
data = None
type = request.POST.get('form_type', None)
if type == 'account-info':
first_name = request.POST.get('first_name', '')
last_name = request.POST.get('last_name', '')
anon_email = request.POST.get('anon_checkbox', False) == 'True'
location = request.POST.get('location', '')
website = request.POST.get('website', '')
bio = request.POST.get('bio', '')
user.first_name = first_name
user.last_name = last_name
user.save()
profile = user.profile
profile.anon_email = anon_email
profile.location = location
profile.website = website
profile.bio = bio
profile.save()
success = "User info updated!"
elif type == 'pic':
pic_url = request.POST.get('pic_url')
# download and upload to our S3
pic_url = put_profile_pic(pic_url, user.profile)
if pic_url: # no errors/less than 1mb #patlsotw
user.profile.pic_url = pic_url
user.profile.save()
success = "Profile picture changed!"
else:
errors['pic'] = ['Oops -- something went wrong.']
resp = {
'success': success,
'errors': errors,
'type': type,
'data': data,
}
return JSONResponse(resp)
return _template_values(request,
page_title="edit sharelist",
header="account info",
navbar='nav_account',
sub_navbar="subnav_account_info")
@login_required
@render_to('accounts/connections.html')
def connections(request):
"""
Edit connection (following/followers)
"""
following = request.user.profile.follows.all()
followers = request.user.profile.followed_by.all()
rendered_following = connection_table_renderer(
following, 'following', following)
rendered_followers = connection_table_renderer(
followers, 'followers', following)
template_dict = {
"rendered_followers": rendered_followers,
"rendered_following": rendered_following,
"header": connections,
}
return _template_values(request,
page_title="edit connections",
navbar='nav_account',
sub_navbar="subnav_connections",
**template_dict)
@login_required
@render_to('accounts/sync_delicious.html')
def sync_delicious(request):
"""
Edit connection (following/followers)
"""
user = request.user
template_dict = {"connected": False,
"synced": "You are not connected to Eyebrowse."}
delicious_info = DeliciousInfo.objects.filter(user=user)
if len(delicious_info) > 0:
template_dict[
"synced"] = "Your Delicious account is already connected to Eyebrowse."
template_dict['connected'] = True
else:
if "code" in request.GET:
code = request.GET.get("code")
data = urllib.urlencode(
{'client_id': DELICIOUS_CONSUMER_KEY,
'client_secret': DELICIOUS_CONSUMER_SECRET,
'grant_type': "code",
'redirect_uri': "http://eyebrowse.csail.mit.edu/accounts/profile/sync_delicious",
'code': code,
})
results = json.loads(urllib2.urlopen(
'https://avosapi.delicious.com/api/v1/oauth/token',
data).read())
access_token = results["access_token"]
DeliciousInfo.objects.create(
user=user, access_token=access_token)
template_dict[
"synced"] = "Your Delicious account is now connected to Eyebrowse!"
template_dict['connected'] = True
else:
return redirect_to(request,
"https://delicious.com/auth/authorize?client_id=" +
DELICIOUS_CONSUMER_KEY +
"&redirect_uri=http://eyebrowse.csail.mit.edu/accounts/profile/sync_delicious")
return _template_values(request,
page_title="Connect Delicious",
navbar='nav_account',
sub_navbar="subnav_sync_delicious",
**template_dict)
@login_required
@render_to('accounts/edit_tag.html')
def edit_tags(request):
user = request.user
tags = Tag.objects.filter(user=user)
tag_dict = {}
for tag in tags:
if tag.name in tag_dict:
tag_dict[tag.name].append(tag)
else:
tag_dict[tag.name] = [tag]
template_dict = {"tags": tag_dict.values()}
return _template_values(request,
page_title="Edit My Tags",
navbar='nav_account',
sub_navbar="subnav_edit_tags",
**template_dict)
def get_twitter_info(request, api, twit_obj, template_dict):
template_dict['connected'] = True
template_dict['username'] = twit_obj.twitter_username
eye_friends = UserProfile.objects.get(user=request.user).follows.all().values_list('user__username', flat=True)
friends = api.friends_ids()
twitter_friends = TwitterInfo.objects.filter(twitter_id__in=friends)
for friend in twitter_friends:
friend.follows = str(friend.user.username in eye_friends)
template_dict['twitter_friends'] = twitter_friends
@login_required
@render_to('accounts/sync_twitter.html')
def sync_twitter(request):
"""
Edit connection (following/followers)
"""
user = request.user
template_dict = {"connected": False,
"synced": "You are not connected to Eyebrowse."}
auth = tweepy.OAuthHandler(
TWITTER_CONSUMER_KEY,
TWITTER_CONSUMER_SECRET,
"http://eyebrowse.csail.mit.edu/accounts/profile/sync_twitter")
twitter_info = TwitterInfo.objects.filter(user=user)
if len(twitter_info) > 0:
auth.set_access_token(
twitter_info[0].access_token,
twitter_info[0].access_token_secret)
api = tweepy.API(auth)
template_dict[
"synced"] = "Your Twitter account is already connected to Eyebrowse."
get_twitter_info(request, api, twitter_info[0], template_dict)
else:
if "request_token" in request.session:
token = request.session.pop("request_token")
auth.request_token = token
try:
verifier = request.GET.get('oauth_verifier')
auth.get_access_token(verifier)
token = auth.access_token
secret = auth.access_token_secret
api = tweepy.API(auth)
twitter_user = api.me()
username = twitter_user.screen_name
twitter_id = twitter_user.id
twit_obj = TwitterInfo.objects.create(
user=user, twitter_username=username, twitter_id=twitter_id,
access_token=token, access_token_secret=secret)
template_dict[
"synced"] = "Your Twitter account is now connected to Eyebrowse!"
get_twitter_info(request, api, twit_obj, template_dict)
except tweepy.TweepError, e:
logger.info(e)
logger.info("Error! Failed to get access token")
else:
logger.info("no request_token")
try:
redirect_rule = auth.get_authorization_url()
request.session["request_token"] = auth.request_token
return redirect_to(request, redirect_rule)
except tweepy.TweepError, e:
logger.info(e)
logger.info("Error! Failed to get request token")
return _template_values(request,
page_title="Connect Twitter",
navbar='nav_account',
sub_navbar="subnav_sync_twitter",
**template_dict)
@login_required
@ajax_request
def connect(request):
success = False
errors = {}
data = None
req_prof = request.user.profile
if request.POST and request.is_ajax():
type = request.POST.get('type', None)
username = request.POST.get('user', None)
if type and username:
user = User.objects.filter(username=username)
if user.exists():
user = user[0]
else:
user = None
if not user:
errors['user'] = "Requested user %s not found." % username
elif user.profile == req_prof:
errors['user'] = "Cannot follow yourself."
else:
if type == 'add-follow':
req_prof.follows.add(user.profile)
try:
c = Command()
c.user_populate_history(request.user, user)
except Exception, e:
print e
notice = NoticeType.objects.get(label="new_follower")
Notification.objects.create(recipient=user, sender=request.user, notice_type=notice)
send_now([user], "new_follower", sender=request.user)
elif type == 'rm-follow' and req_prof.follows.filter(
user=user).exists():
req_prof.follows.remove(user)
try:
c = Command()
c.remove_user_populate_history(request.user, user)
except Exception, e:
print e
success = True
data = {
'type': type,
'user': username,
}
else:
errors[
'user'] = 'Username required. Provided %s as username.' % username
errors['type'] = 'Type required. Provided %s as type.' % type
resp = {
'success': success,
'errors': errors,
'data': data,
}
return resp
|
# -*- coding: utf-8 -*-
"""Examples of non-linear functions for non-parametric regression
Created on Sat Jan 05 20:21:22 2013
Author: Josef Perktold
"""
import numpy as np
def fg1(x):
'''Fan and Gijbels example function 1
'''
return x + 2 * np.exp(-16 * x**2)
def fg1eu(x):
'''Eubank similar to Fan and Gijbels example function 1
'''
return x + 0.5 * np.exp(-50 * (x - 0.5)**2)
def fg2(x):
'''Fan and Gijbels example function 2
'''
return np.sin(2 * x) + 2 * np.exp(-16 * x**2)
class _UnivariateFanGijbels(object):
'''Fan and Gijbels example function 1
'''
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
if x is None:
if distr_x is None:
x = np.random.normal(loc=0, scale=self.s_x, size=nobs)
else:
x = distr_x.rvs(size=nobs)
self.x = x
self.x.sort()
if distr_noise is None:
noise = np.random.normal(loc=0, scale=self.s_noise, size=nobs)
else:
noise = distr_noise.rvs(size=nobs)
#self.func = fg1
self.y_true = y_true = self.func(x)
self.y = y_true + noise
def plot(self, scatter=True, ax=None):
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
if scatter:
ax.plot(self.x, self.y, 'o', alpha=0.5)
xx = np.linspace(self.x.min(), self.x.max(), 100)
ax.plot(xx, self.func(xx), lw=2, color='b', label='dgp mean')
return ax.figure
class UnivariateFanGijbels1(_UnivariateFanGijbels):
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
self.s_x = 1.
self.s_noise = 0.7
self.func = fg1
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
class UnivariateFanGijbels2(_UnivariateFanGijbels):
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
self.s_x = 1.
self.s_noise = 0.5
self.func = fg2
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
class UnivariateFanGijbels1EU(_UnivariateFanGijbels):
'''
Eubank p.179f
'''
def __init__(self, nobs=50, x=None, distr_x=None, distr_noise=None):
from scipy import stats
distr_x = stats.uniform
self.s_noise = 0.15
self.func = fg1eu
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
DOC: docstrings dgp_examples
# -*- coding: utf-8 -*-
"""Examples of non-linear functions for non-parametric regression
Created on Sat Jan 05 20:21:22 2013
Author: Josef Perktold
"""
import numpy as np
def fg1(x):
'''Fan and Gijbels example function 1
'''
return x + 2 * np.exp(-16 * x**2)
def fg1eu(x):
'''Eubank similar to Fan and Gijbels example function 1
'''
return x + 0.5 * np.exp(-50 * (x - 0.5)**2)
def fg2(x):
'''Fan and Gijbels example function 2
'''
return np.sin(2 * x) + 2 * np.exp(-16 * x**2)
doc = {'description':
'''Base Class for Univariate non-linear example
Does not work on it's own.
needs additional at least self.func
''',
'ref': ''}
class _UnivariateFanGijbels(object):
__doc__ = '''%(description)s
Parameters
----------
nobs : int
number of observations to simulate
x : None or 1d array
If x is given then it is used for the exogenous variable instead of
creating a random sample
distr_x : None or distribution instance
Only used if x is None. The rvs method is used to create a random
sample of the exogenous (explanatory) variable.
distr_noise : None or distribution instance
The rvs method is used to create a random sample of the errors.
Attributes
----------
x : ndarray, 1-D
exogenous or explanatory variable. x is sorted.
y : ndarray, 1-D
endogenous or response variable
y_true : ndarray, 1-D
expected values of endogenous or response variable, i.e. values of y
without noise
func : callable
underlying function (defined by subclass)
%(ref)s
''' #% doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
if x is None:
if distr_x is None:
x = np.random.normal(loc=0, scale=self.s_x, size=nobs)
else:
x = distr_x.rvs(size=nobs)
self.x = x
self.x.sort()
if distr_noise is None:
noise = np.random.normal(loc=0, scale=self.s_noise, size=nobs)
else:
noise = distr_noise.rvs(size=nobs)
#self.func = fg1
self.y_true = y_true = self.func(x)
self.y = y_true + noise
def plot(self, scatter=True, ax=None):
'''plot the mean function and optionally the scatter of the sample
Parameters
----------
scatter: bool
add scatterpoints of sample to plot
ax : None or matplotlib axis instance
If None, then a matplotlib.pyplot figure is created, otherwise
the given axis, ax, is used.
Returns
-------
fig : matplotlib figure
This is either the created figure instance or the one associated
with ax if ax is given.
'''
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
if scatter:
ax.plot(self.x, self.y, 'o', alpha=0.5)
xx = np.linspace(self.x.min(), self.x.max(), 100)
ax.plot(xx, self.func(xx), lw=2, color='b', label='dgp mean')
return ax.figure
doc = {'description':
'''Fan and Gijbels example function 1
linear trend plus a hump
''',
'ref':
'''
References
----------
Fan, Jianqing, and Irene Gijbels. 1992. "Variable Bandwidth and Local
Linear Regression Smoothers."
The Annals of Statistics 20 (4) (December): 2008-2036. doi:10.2307/2242378.
'''}
class UnivariateFanGijbels1(_UnivariateFanGijbels):
__doc__ = _UnivariateFanGijbels.__doc__ % doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
self.s_x = 1.
self.s_noise = 0.7
self.func = fg1
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
doc['description'] =\
'''Fan and Gijbels example function 2
sin plus a hump
'''
class UnivariateFanGijbels2(_UnivariateFanGijbels):
__doc__ = _UnivariateFanGijbels.__doc__ % doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
self.s_x = 1.
self.s_noise = 0.5
self.func = fg2
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
class UnivariateFanGijbels1EU(_UnivariateFanGijbels):
'''
Eubank p.179f
'''
def __init__(self, nobs=50, x=None, distr_x=None, distr_noise=None):
from scipy import stats
distr_x = stats.uniform
self.s_noise = 0.15
self.func = fg1eu
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
|
import json
import logging
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
from django.db import IntegrityError
from django_openid_auth.views import parse_openid_response, login_complete
from django_openid_auth.auth import OpenIDBackend
from django_openid_auth.exceptions import IdentityAlreadyClaimed
from .models import AuthToken, User, EmailConfirmationToken
from . import forms
from . import tasks
import games.models
import games.util.steam
LOGGER = logging.getLogger(__name__)
def register(request):
form = forms.RegistrationForm(request.POST or None)
if request.method == "POST" and form.is_valid():
form.save()
return HttpResponseRedirect('/')
return render(request, 'registration/registration_form.html',
{'form': form})
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip_address = x_forwarded_for.split(',')[0]
else:
ip_address = request.META.get('REMOTE_ADDR')
return ip_address
@csrf_exempt
def client_auth(request):
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user and user.is_active:
response_data = {'token': user.api_key.key}
else:
response_data = {'error': "Bad credentials"}
return HttpResponse(json.dumps(response_data),
content_type="application/json")
@csrf_exempt
def client_verify(request):
token = request.POST.get('token')
try:
auth_token = AuthToken.objects.get(token=token,
ip_address=get_client_ip(request))
response_data = {'username': auth_token.user.username}
except AuthToken.DoesNotExist:
response_data = {'error': 'invalid token'}
return HttpResponse(json.dumps(response_data),
content_type="application/json")
@login_required
def profile(request):
user = request.user
return HttpResponseRedirect(reverse('user_account',
args=(user.username, )))
def user_account(request, username):
user = get_object_or_404(User, username=username)
if request.user.username == username:
submissions = games.models.GameSubmission.objects.filter(
user=user, accepted_at__isnull=True
)
return render(request, 'accounts/profile.html',
{'user': user, 'submissions': submissions})
else:
# TODO We're returning a 404 error until we have a good public profile
# page (with worthwhile content)
return Http404
# return render(request, 'accounts/public_profile.html', {'user': user})
@login_required
def user_send_confirmation(request):
user = request.user
if not user.email_confirmed:
token = EmailConfirmationToken(email=user.email)
token.create_token()
token.save()
token.send(request)
return render(request, 'accounts/confirmation_send.html', {'user': user})
def user_email_confirm(request):
token = request.GET.get('token')
confirmation_token = get_object_or_404(EmailConfirmationToken, token=token)
if confirmation_token.is_valid():
confirmation_token.confirm_user()
confirmation_token.delete()
return render(request, 'accounts/confirmation_received.html',
{'confirmation_token': confirmation_token})
@login_required
def profile_edit(request, username):
user = get_object_or_404(User, username=username)
if user != request.user:
raise Http404
form = forms.ProfileForm(request.POST or None, request.FILES or None,
instance=user)
if form.is_valid():
form.save()
messages.success(
request,
'Your account info has been updated.'
)
return redirect(reverse('user_account', args=(username, )))
return render(request, 'accounts/profile_edit.html', {'form': form})
@csrf_exempt
def associate_steam(request):
if not request.user.is_authenticated():
return login_complete(request)
else:
openid_response = parse_openid_response(request)
account_url = reverse('user_account', args=(request.user.username, ))
if openid_response.status == 'failure':
messages.warning(request, "Failed to associate Steam account")
return redirect(account_url)
openid_backend = OpenIDBackend()
try:
openid_backend.associate_openid(request.user, openid_response)
except IdentityAlreadyClaimed:
messages.warning(
request,
"This Steam account is already claimed by another Lutris "
"account.\nPlease contact an administrator if you want "
"to reattribute your Steam account to this current account."
)
return redirect(account_url)
request.user.set_steamid()
request.user.save()
return redirect(reverse("library_steam_sync"))
def library_show(request, username):
user = get_object_or_404(User, username=username)
library = games.models.GameLibrary.objects.get(user=user)
library_games = library.games.all()
return render(request, 'accounts/library_show.html',
{'user': user, 'games': library_games,
'is_library': True})
@login_required
def library_add(request, slug):
user = request.user
library = games.models.GameLibrary.objects.get(user=user)
game = get_object_or_404(games.models.Game, slug=slug)
try:
library.games.add(game)
except IntegrityError:
LOGGER.debug('Game already in library')
return redirect(game.get_absolute_url())
@login_required
def library_remove(request, slug):
user = request.user
library = games.models.GameLibrary.objects.get(user=user)
game = get_object_or_404(games.models.Game, slug=slug)
library.games.remove(game)
redirect_url = request.META.get('HTTP_REFERER')
if not redirect_url:
username = user.username
redirect_url = reverse('library_show', kwargs={'username': username})
return redirect(redirect_url)
@login_required
def library_steam_sync(request):
user = request.user
tasks.sync_steam_library.delay(user.id)
messages.success(
request,
'Your Steam library is being synced with your Lutris account'
)
return redirect(reverse("library_show",
kwargs={'username': user.username}))
Make library private by default
import json
import logging
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
from django.db import IntegrityError
from django_openid_auth.views import parse_openid_response, login_complete
from django_openid_auth.auth import OpenIDBackend
from django_openid_auth.exceptions import IdentityAlreadyClaimed
from .models import AuthToken, User, EmailConfirmationToken
from . import forms
from . import tasks
import games.models
import games.util.steam
LOGGER = logging.getLogger(__name__)
def register(request):
form = forms.RegistrationForm(request.POST or None)
if request.method == "POST" and form.is_valid():
form.save()
return HttpResponseRedirect('/')
return render(request, 'registration/registration_form.html',
{'form': form})
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip_address = x_forwarded_for.split(',')[0]
else:
ip_address = request.META.get('REMOTE_ADDR')
return ip_address
@csrf_exempt
def client_auth(request):
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user and user.is_active:
response_data = {'token': user.api_key.key}
else:
response_data = {'error': "Bad credentials"}
return HttpResponse(json.dumps(response_data),
content_type="application/json")
@csrf_exempt
def client_verify(request):
token = request.POST.get('token')
try:
auth_token = AuthToken.objects.get(token=token,
ip_address=get_client_ip(request))
response_data = {'username': auth_token.user.username}
except AuthToken.DoesNotExist:
response_data = {'error': 'invalid token'}
return HttpResponse(json.dumps(response_data),
content_type="application/json")
@login_required
def profile(request):
user = request.user
return HttpResponseRedirect(reverse('user_account',
args=(user.username, )))
def user_account(request, username):
user = get_object_or_404(User, username=username)
if request.user.username == username:
submissions = games.models.GameSubmission.objects.filter(
user=user, accepted_at__isnull=True
)
return render(request, 'accounts/profile.html',
{'user': user, 'submissions': submissions})
else:
# TODO We're returning a 404 error until we have a good public profile
# page (with worthwhile content)
return Http404
# return render(request, 'accounts/public_profile.html', {'user': user})
@login_required
def user_send_confirmation(request):
user = request.user
if not user.email_confirmed:
token = EmailConfirmationToken(email=user.email)
token.create_token()
token.save()
token.send(request)
return render(request, 'accounts/confirmation_send.html', {'user': user})
def user_email_confirm(request):
token = request.GET.get('token')
confirmation_token = get_object_or_404(EmailConfirmationToken, token=token)
if confirmation_token.is_valid():
confirmation_token.confirm_user()
confirmation_token.delete()
return render(request, 'accounts/confirmation_received.html',
{'confirmation_token': confirmation_token})
@login_required
def profile_edit(request, username):
user = get_object_or_404(User, username=username)
if user != request.user:
raise Http404
form = forms.ProfileForm(request.POST or None, request.FILES or None,
instance=user)
if form.is_valid():
form.save()
messages.success(
request,
'Your account info has been updated.'
)
return redirect(reverse('user_account', args=(username, )))
return render(request, 'accounts/profile_edit.html', {'form': form})
@csrf_exempt
def associate_steam(request):
if not request.user.is_authenticated():
return login_complete(request)
else:
openid_response = parse_openid_response(request)
account_url = reverse('user_account', args=(request.user.username, ))
if openid_response.status == 'failure':
messages.warning(request, "Failed to associate Steam account")
return redirect(account_url)
openid_backend = OpenIDBackend()
try:
openid_backend.associate_openid(request.user, openid_response)
except IdentityAlreadyClaimed:
messages.warning(
request,
"This Steam account is already claimed by another Lutris "
"account.\nPlease contact an administrator if you want "
"to reattribute your Steam account to this current account."
)
return redirect(account_url)
request.user.set_steamid()
request.user.save()
return redirect(reverse("library_steam_sync"))
def library_show(request, username):
user = get_object_or_404(User, username=username)
if request.user != user:
# TODO: Implement a profile setting to set the library public
raise Http404
library = games.models.GameLibrary.objects.get(user=user)
library_games = library.games.all()
return render(request, 'accounts/library_show.html',
{'user': user, 'games': library_games,
'is_library': True})
@login_required
def library_add(request, slug):
user = request.user
library = games.models.GameLibrary.objects.get(user=user)
game = get_object_or_404(games.models.Game, slug=slug)
try:
library.games.add(game)
except IntegrityError:
LOGGER.debug('Game already in library')
return redirect(game.get_absolute_url())
@login_required
def library_remove(request, slug):
user = request.user
library = games.models.GameLibrary.objects.get(user=user)
game = get_object_or_404(games.models.Game, slug=slug)
library.games.remove(game)
redirect_url = request.META.get('HTTP_REFERER')
if not redirect_url:
username = user.username
redirect_url = reverse('library_show', kwargs={'username': username})
return redirect(redirect_url)
@login_required
def library_steam_sync(request):
user = request.user
tasks.sync_steam_library.delay(user.id)
messages.success(
request,
'Your Steam library is being synced with your Lutris account'
)
return redirect(reverse("library_show",
kwargs={'username': user.username}))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
#
# ODOO (ex OpenERP)
# Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<https://micronaet.com>)
# Developer: Nicola Riolini @thebrush (<https://it.linkedin.com/in/thebrush>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import xlsxwriter
from openerp.osv import fields, osv, expression
from datetime import datetime, timedelta
from openerp.tools.translate import _
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
import pdb
_logger = logging.getLogger(__name__)
class ResCompany(osv.osv):
""" Model name: Parameters
"""
_inherit = 'res.company'
_columns = {
}
def get_type(self, code, uom):
""" Extract type from code
"""
code = (code or '').strip().upper()
uom = (uom or '').upper()
if not code:
return _('Not assigned')
start = code[0]
end = code[-1]
if uom == 'PCE': # Machinery and Component
return 'COMP'
if start in 'PR': # Waste
return 'REC'
if start in 'AB': # Raw materials
return 'MP'
if end == 'X': # Production (MX)
return 'PT'
return 'IT' # Re-sold (IT)
# Override for MX report (was different)
def extract_product_level_xlsx(self, cr, uid, ids, context=None):
""" Extract current report stock level MX Version
"""
if context is None:
context = {}
save_mode = context.get('save_mode')
# Pool used:
excel_pool = self.pool.get('excel.writer')
product_pool = self.pool.get('product.product')
# ---------------------------------------------------------------------
# Excel export:
# ---------------------------------------------------------------------
# Setup:
header = [
u'Tipo',
u'Codigo', u'Descripcion', u'UM',
u'Appr.', u'Mod.',
u'Invent. actual', u'Status',
u'Manual',
u'Tiempo de Entrega',
u'Promedio Kg/Mes',
u'Nivel Minimo Dias', u'Nivel Minimo Kg.',
u'Nivel Maximo Dia', u'Nivel Maximo Kg.',
# u'Obsolete',
]
width = [
10,
15, 30, 5,
6, 9,
10, 10,
5, 8, 8,
8, 8,
8, 8,
# 6,
]
# ---------------------------------------------------------------------
# Create WS:
# ---------------------------------------------------------------------
ws_not_present = 'Sin Movimentos'
ws_list = (
('ROP', [
# ('manual_stock_level', '=', False),
('medium_stock_qty', '>', 0),
]),
# ('Niveles Manuales', [
# ('manual_stock_level', '=', True),
# # ('min_stock_level', '>', 0),
# ]),
(ws_not_present, [
('min_stock_level', '<=', 0),
]),
)
# Create all pages:
excel_format = {}
removed_ids = []
for ws_name, product_filter in ws_list:
excel_pool.create_worksheet(name=ws_name)
excel_pool.column_width(ws_name, width)
# excel_pool.row_height(ws_name, row_list, height=10)
excel_pool.freeze_panes(ws_name, 1, 2)
excel_pool.column_hidden(ws_name, [4, 5, 9])
# -----------------------------------------------------------------
# Generate format used (first time only):
# -----------------------------------------------------------------
if not excel_format:
excel_pool.set_format(header_size=10, text_size=10)
excel_format['title'] = excel_pool.get_format(key='title')
excel_format['header'] = excel_pool.get_format(key='header')
excel_format['header_wrap'] = excel_pool.get_format(
key='header_wrap')
excel_format['text'] = excel_pool.get_format(key='text')
excel_format['right'] = excel_pool.get_format(key='text_right')
excel_format['number'] = excel_pool.get_format(key='number')
excel_format['white'] = {
'text': excel_pool.get_format(key='text'),
'right': excel_pool.get_format(key='text_right'),
'number': excel_pool.get_format(key='number'),
}
excel_format['orange'] = {
'text': excel_pool.get_format(key='bg_orange'),
'right': excel_pool.get_format(key='bg_orange_right'),
'number': excel_pool.get_format(key='bg_orange_number'),
}
excel_format['red'] = {
'text': excel_pool.get_format(key='bg_red'),
'right': excel_pool.get_format(key='bg_red_right'),
'number': excel_pool.get_format(key='bg_red_number'),
}
# -----------------------------------------------------------------
# Write title / header
# -----------------------------------------------------------------
row = 0
excel_pool.write_xls_line(
ws_name, row, header,
default_format=excel_format['header_wrap'])
excel_pool.autofilter(ws_name, row, row, 0, len(header) - 1)
excel_pool.row_height(ws_name, [row], height=38)
# -----------------------------------------------------------------
# Product selection:
# -----------------------------------------------------------------
product_ids = product_pool.search(
cr, uid, product_filter, context=context)
if ws_name == ws_not_present and removed_ids:
# Add also removed from other loop
product_ids = list(set(product_ids).union(set(removed_ids)))
products = product_pool.browse(
cr, uid, product_ids,
context=context)
# TODO add also package data!!!
row += 1
for product in sorted(products, key=lambda x: (
self.get_type(x.default_code, x.uom_id.name),
x.default_code)):
# Filter code:
default_code = product.default_code
if not default_code:
_logger.error('Product %s has no code' % product.name)
continue
product_type = self.get_type(
product.default_code, product.uom_id.name)
# Remove REC and SER product (go in last page):
if ws_name != ws_not_present and product_type == 'REC' or \
default_code.startswith('SER'):
removed_ids.append(product.id)
continue
account_qty = int(product.accounting_qty)
min_stock_level = int(product.min_stock_level)
if account_qty < min_stock_level:
state = _(u'Bajo Nivel')
color_format = excel_format['orange']
elif account_qty < 0:
state = _(u'Negativo')
color_format = excel_format['red']
else:
state = _('OK')
color_format = excel_format['white']
line = [
product_type,
default_code or '',
product.name or '',
product.uom_id.name or '',
(product.approx_integer, color_format['right']),
product.approx_mode or '',
(account_qty, color_format['right']),
state,
(product.manual_stock_level or '', color_format['right']),
product.day_leadtime or '',
# per month:
(product.medium_stock_qty * 30, color_format['number']),
(product.day_min_level, color_format['right']),
(int(min_stock_level), color_format['right']),
(product.day_max_level, color_format['right']),
(int(product.max_stock_level), color_format['right']),
# 'X' if product.stock_obsolete else '',
]
excel_pool.write_xls_line(
ws_name, row, line, default_format=color_format['text'])
row += 1
if save_mode:
return excel_pool.save_file_as(save_mode)
else:
return excel_pool.return_attachment(
cr, uid, 'Livelli prodotto MX', 'stock_level_MX.xlsx',
version='7.0', php=True, context=context)
class MrpProductionWorkcenterLineOverride(osv.osv):
""" Model name: Override for add product in calc of medium
"""
_inherit = 'mrp.production.workcenter.line'
# Override to medium also product and packages:
def update_product_level_from_production(self, cr, uid, ids, context=None):
""" Update product level from production (this time also product)
MX Mode:
"""
_logger.info('Updating medium from MRP (final product)')
company_pool = self.pool.get('res.company')
load_pool = self.pool.get('mrp.production.workcenter.load')
# Get parameters:
company_ids = company_pool.search(cr, uid, [], context=context)
company = company_pool.browse(
cr, uid, company_ids, context=context)[0]
stock_level_days = company.stock_level_days
if not stock_level_days:
raise osv.except_osv(
_('Error stock management'),
_('Setup the parameter in company form'),
)
# MRP stock level extra parameters:
mrp_stock_level_mp = company.mrp_stock_level_mp or stock_level_days
mrp_stock_level_pf = company.mrp_stock_level_pf or stock_level_days
# mrp_stock_level_force (for product)
now = datetime.now()
date_limit = {
# statistic period from keep MRP production:
'now': self.get_form_date(now, 0),
'mrp': self.get_form_date(now, stock_level_days),
'material': self.get_form_date(now, mrp_stock_level_mp),
'product': self.get_form_date(now, mrp_stock_level_pf),
}
# Update with particular product
self.get_product_stock_days_force(cr, uid, date_limit, context=context)
load_ids = load_pool.search(cr, uid, [
('date', '>=', date_limit['mrp']),
('date', '<', date_limit['now']),
('recycle', '=', False),
], context=context)
_logger.warning('Load found: %s Period: [>=%s <%s]' % (
len(load_ids),
date_limit['mrp'],
date_limit['now'],
))
product_obsolete = {}
product_medium = {}
log_f = open(os.path.expanduser('~/load.log'), 'w')
for load in load_pool.browse(
cr, uid, load_ids, context=context):
date = load.date
# -----------------------------------------------------------------
# Product:
# -----------------------------------------------------------------
product = load.product_id # production_id.product_id
if product not in product_obsolete:
product_obsolete[product] = True # Default obsolete
# Check product obsolete (partic or default):
if date > date_limit.get(product, date_limit['product']):
product_obsolete[product] = False
quantity = load.product_qty
if product in product_medium:
product_medium[product] += quantity
else:
product_medium[product] = quantity
# -----------------------------------------------------------------
# Recycle:
# -----------------------------------------------------------------
# recycle_product_id # TODO not used
# -----------------------------------------------------------------
# Package:
# -----------------------------------------------------------------
product = load.package_id.linked_product_id
if product not in product_obsolete:
product_obsolete[product] = True # Default obsolete
# Check product obsolete (partic. or default):
if date > date_limit.get(product, date_limit['product']):
product_obsolete[product] = False
quantity = load.ul_qty
if product and quantity:
if product in product_medium:
product_medium[product] += quantity
else:
product_medium[product] = quantity
# -----------------------------------------------------------------
# Package:
# -----------------------------------------------------------------
product = load.pallet_product_id
if product not in product_obsolete:
product_obsolete[product] = True # Default obsolete
# Check product obsolete (partic. or default):
if date > date_limit.get(product, date_limit['product']):
product_obsolete[product] = False
quantity = load.pallet_qty
if product and quantity:
if product in product_medium:
product_medium[product] += quantity
else:
product_medium[product] = quantity
log_f.write('%s|%s|%s|%s|%s|%s\n' % (
date,
load.production_id.name,
load.product_id.id,
load.product_id.default_code or '',
load.product_qty,
date > date_limit.get(product, date_limit['product']),
))
# Update medium in product:
self.update_product_medium_from_dict(
cr, uid, product_medium, stock_level_days,
product_obsolete, # manage obsolete in this function,
context=context)
# Call original method for raw materials:
return super(MrpProductionWorkcenterLineOverride, self).\
update_product_level_from_production(cr, uid, ids, context=context)
debug operation
#!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
#
# ODOO (ex OpenERP)
# Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<https://micronaet.com>)
# Developer: Nicola Riolini @thebrush (<https://it.linkedin.com/in/thebrush>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import xlsxwriter
from openerp.osv import fields, osv, expression
from datetime import datetime, timedelta
from openerp.tools.translate import _
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
import pdb
_logger = logging.getLogger(__name__)
class ResCompany(osv.osv):
""" Model name: Parameters
"""
_inherit = 'res.company'
_columns = {
}
def get_type(self, code, uom):
""" Extract type from code
"""
code = (code or '').strip().upper()
uom = (uom or '').upper()
if not code:
return _('Not assigned')
start = code[0]
end = code[-1]
if uom == 'PCE': # Machinery and Component
return 'COMP'
if start in 'PR': # Waste
return 'REC'
if start in 'AB': # Raw materials
return 'MP'
if end == 'X': # Production (MX)
return 'PT'
return 'IT' # Re-sold (IT)
# Override for MX report (was different)
def extract_product_level_xlsx(self, cr, uid, ids, context=None):
""" Extract current report stock level MX Version
"""
if context is None:
context = {}
save_mode = context.get('save_mode')
# Pool used:
excel_pool = self.pool.get('excel.writer')
product_pool = self.pool.get('product.product')
# ---------------------------------------------------------------------
# Excel export:
# ---------------------------------------------------------------------
# Setup:
header = [
u'Tipo',
u'Codigo', u'Descripcion', u'UM',
u'Appr.', u'Mod.',
u'Invent. actual', u'Status',
u'Manual',
u'Tiempo de Entrega',
u'Promedio Kg/Mes',
u'Nivel Minimo Dias', u'Nivel Minimo Kg.',
u'Nivel Maximo Dia', u'Nivel Maximo Kg.',
# u'Obsolete',
]
width = [
10,
15, 30, 5,
6, 9,
10, 10,
5, 8, 8,
8, 8,
8, 8,
# 6,
]
# ---------------------------------------------------------------------
# Create WS:
# ---------------------------------------------------------------------
ws_not_present = 'Sin Movimentos'
ws_list = (
('ROP', [
# ('manual_stock_level', '=', False),
('medium_stock_qty', '>', 0),
]),
# ('Niveles Manuales', [
# ('manual_stock_level', '=', True),
# # ('min_stock_level', '>', 0),
# ]),
(ws_not_present, [
('min_stock_level', '<=', 0),
]),
)
# Create all pages:
excel_format = {}
removed_ids = []
for ws_name, product_filter in ws_list:
excel_pool.create_worksheet(name=ws_name)
excel_pool.column_width(ws_name, width)
# excel_pool.row_height(ws_name, row_list, height=10)
excel_pool.freeze_panes(ws_name, 1, 2)
excel_pool.column_hidden(ws_name, [4, 5, 9])
# -----------------------------------------------------------------
# Generate format used (first time only):
# -----------------------------------------------------------------
if not excel_format:
excel_pool.set_format(header_size=10, text_size=10)
excel_format['title'] = excel_pool.get_format(key='title')
excel_format['header'] = excel_pool.get_format(key='header')
excel_format['header_wrap'] = excel_pool.get_format(
key='header_wrap')
excel_format['text'] = excel_pool.get_format(key='text')
excel_format['right'] = excel_pool.get_format(key='text_right')
excel_format['number'] = excel_pool.get_format(key='number')
excel_format['white'] = {
'text': excel_pool.get_format(key='text'),
'right': excel_pool.get_format(key='text_right'),
'number': excel_pool.get_format(key='number'),
}
excel_format['orange'] = {
'text': excel_pool.get_format(key='bg_orange'),
'right': excel_pool.get_format(key='bg_orange_right'),
'number': excel_pool.get_format(key='bg_orange_number'),
}
excel_format['red'] = {
'text': excel_pool.get_format(key='bg_red'),
'right': excel_pool.get_format(key='bg_red_right'),
'number': excel_pool.get_format(key='bg_red_number'),
}
# -----------------------------------------------------------------
# Write title / header
# -----------------------------------------------------------------
row = 0
excel_pool.write_xls_line(
ws_name, row, header,
default_format=excel_format['header_wrap'])
excel_pool.autofilter(ws_name, row, row, 0, len(header) - 1)
excel_pool.row_height(ws_name, [row], height=38)
# -----------------------------------------------------------------
# Product selection:
# -----------------------------------------------------------------
product_ids = product_pool.search(
cr, uid, product_filter, context=context)
if ws_name == ws_not_present and removed_ids:
# Add also removed from other loop
product_ids = list(set(product_ids).union(set(removed_ids)))
products = product_pool.browse(
cr, uid, product_ids,
context=context)
# TODO add also package data!!!
row += 1
for product in sorted(products, key=lambda x: (
self.get_type(x.default_code, x.uom_id.name),
x.default_code)):
# Filter code:
default_code = product.default_code
if default_code == 'S0045VV--X':
pdb.set_trace()
if not default_code:
_logger.error('Product %s has no code' % product.name)
continue
product_type = self.get_type(
product.default_code, product.uom_id.name)
# Remove REC and SER product (go in last page):
if ws_name != ws_not_present and product_type == 'REC' or \
default_code.startswith('SER'):
removed_ids.append(product.id)
continue
account_qty = int(product.accounting_qty)
min_stock_level = int(product.min_stock_level)
if account_qty < min_stock_level:
state = _(u'Bajo Nivel')
color_format = excel_format['orange']
elif account_qty < 0:
state = _(u'Negativo')
color_format = excel_format['red']
else:
state = _('OK')
color_format = excel_format['white']
line = [
product_type,
default_code or '',
product.name or '',
product.uom_id.name or '',
(product.approx_integer, color_format['right']),
product.approx_mode or '',
(account_qty, color_format['right']),
state,
(product.manual_stock_level or '', color_format['right']),
product.day_leadtime or '',
# per month:
(product.medium_stock_qty * 30, color_format['number']),
(product.day_min_level, color_format['right']),
(int(min_stock_level), color_format['right']),
(product.day_max_level, color_format['right']),
(int(product.max_stock_level), color_format['right']),
# 'X' if product.stock_obsolete else '',
]
excel_pool.write_xls_line(
ws_name, row, line, default_format=color_format['text'])
row += 1
if save_mode:
return excel_pool.save_file_as(save_mode)
else:
return excel_pool.return_attachment(
cr, uid, 'Livelli prodotto MX', 'stock_level_MX.xlsx',
version='7.0', php=True, context=context)
class MrpProductionWorkcenterLineOverride(osv.osv):
""" Model name: Override for add product in calc of medium
"""
_inherit = 'mrp.production.workcenter.line'
# Override to medium also product and packages:
def update_product_level_from_production(self, cr, uid, ids, context=None):
""" Update product level from production (this time also product)
MX Mode:
"""
_logger.info('Updating medium from MRP (final product)')
company_pool = self.pool.get('res.company')
load_pool = self.pool.get('mrp.production.workcenter.load')
# Get parameters:
company_ids = company_pool.search(cr, uid, [], context=context)
company = company_pool.browse(
cr, uid, company_ids, context=context)[0]
stock_level_days = company.stock_level_days
if not stock_level_days:
raise osv.except_osv(
_('Error stock management'),
_('Setup the parameter in company form'),
)
# MRP stock level extra parameters:
mrp_stock_level_mp = company.mrp_stock_level_mp or stock_level_days
mrp_stock_level_pf = company.mrp_stock_level_pf or stock_level_days
# mrp_stock_level_force (for product)
now = datetime.now()
date_limit = {
# statistic period from keep MRP production:
'now': self.get_form_date(now, 0),
'mrp': self.get_form_date(now, stock_level_days),
'material': self.get_form_date(now, mrp_stock_level_mp),
'product': self.get_form_date(now, mrp_stock_level_pf),
}
# Update with particular product
self.get_product_stock_days_force(cr, uid, date_limit, context=context)
load_ids = load_pool.search(cr, uid, [
('date', '>=', date_limit['mrp']),
('date', '<', date_limit['now']),
('recycle', '=', False),
], context=context)
_logger.warning('Load found: %s Period: [>=%s <%s]' % (
len(load_ids),
date_limit['mrp'],
date_limit['now'],
))
product_obsolete = {}
product_medium = {}
log_f = open(os.path.expanduser('~/load.log'), 'w')
for load in load_pool.browse(
cr, uid, load_ids, context=context):
date = load.date
# -----------------------------------------------------------------
# Product:
# -----------------------------------------------------------------
product = load.product_id # production_id.product_id
if product not in product_obsolete:
product_obsolete[product] = True # Default obsolete
# Check product obsolete (partic or default):
if date > date_limit.get(product, date_limit['product']):
product_obsolete[product] = False
quantity = load.product_qty
if product in product_medium:
product_medium[product] += quantity
else:
product_medium[product] = quantity
# -----------------------------------------------------------------
# Recycle:
# -----------------------------------------------------------------
# recycle_product_id # TODO not used
# -----------------------------------------------------------------
# Package:
# -----------------------------------------------------------------
product = load.package_id.linked_product_id
if product not in product_obsolete:
product_obsolete[product] = True # Default obsolete
# Check product obsolete (partic. or default):
if date > date_limit.get(product, date_limit['product']):
product_obsolete[product] = False
quantity = load.ul_qty
if product and quantity:
if product in product_medium:
product_medium[product] += quantity
else:
product_medium[product] = quantity
# -----------------------------------------------------------------
# Package:
# -----------------------------------------------------------------
product = load.pallet_product_id
if product not in product_obsolete:
product_obsolete[product] = True # Default obsolete
# Check product obsolete (partic. or default):
if date > date_limit.get(product, date_limit['product']):
product_obsolete[product] = False
quantity = load.pallet_qty
if product and quantity:
if product in product_medium:
product_medium[product] += quantity
else:
product_medium[product] = quantity
log_f.write('%s|%s|%s|%s|%s|%s\n' % (
date,
load.production_id.name,
load.product_id.id,
load.product_id.default_code or '',
load.product_qty,
date > date_limit.get(product, date_limit['product']),
))
# Update medium in product:
self.update_product_medium_from_dict(
cr, uid, product_medium, stock_level_days,
product_obsolete, # manage obsolete in this function,
context=context)
# Call original method for raw materials:
return super(MrpProductionWorkcenterLineOverride, self).\
update_product_level_from_production(cr, uid, ids, context=context)
|
# coding: utf-8
# from __future__ import division
import pandas as pd
import os
from os.path import join, abspath, normpath, dirname, split
import numpy as np
from util.utils import getParentDir, rename_cols
import json
def add_datetime(df, year='year', month='month'):
df['datetime'] = pd.to_datetime(df[year].astype(str) + '-' +
df[month].astype(str),
format='%Y-%m')
def add_quarter(df, year='year', month='month'):
add_datetime(df, year, month)
df['quarter'] = df['datetime'].dt.quarter
def facility_emission_gen(eia_facility, epa, state_fuel_cat,
custom_fuel_cat, export_state_cats=False):
"""
Use EIA and EPA data to compile emissions, generation and fuel consumption
reported by facilities into emissions intensity and generation by fuel
category. Only facilities from the region of interest should be passed to
this function.
inputs:
eia_facility: (dataframe) monthly generation and fuel consumption as
reported by facilities to EIA
epa: (dataframe) monthly co2 emissions and gross generation as reported
by facilities to EPA
state_fuel_cat (dict): match of state-level fuel categories to facility
level categories
custom_fuel_cat (dict): match of custom fuel categories for final
results to the state-level categories
export_state_cats (boolean): If co2 and gen should be exported at the
state category level
output:
co2: total adjusted co2 emissions
gen_fuels: generation and fuel consumption
"""
# Make column names consistent
print('Renaming columns')
rename_cols(eia_facility)
rename_cols(epa)
print('Grouping facilities')
eia_grouped = group_facility_data(eia_facility)
print('Adjusting EPA emissions')
epa_adj = adjust_epa_emissions(epa, eia_grouped)
# I need to return co2 (by facility, or just per month?) and gen/fuels by
# fuel type.
print('Caculating CO2')
co2 = facility_co2(epa_adj, eia_grouped)
co2 = co2.loc[:, ['year', 'month', 'plant id', 'final co2 (kg)']]
print('Gen/fuels to state categories')
gen_fuels_state = group_fuel_cats(eia_facility, state_fuel_cat)
if export_state_cats:
return co2, gen_fuels_state
else:
print('Gen/fuels to custom categories')
gen_fuels_custom = group_fuel_cats(gen_fuels_state,
custom_fuel_cat,
fuel_col='type',
new_col='fuel category')
return co2, gen_fuels_custom
def group_facility_data(eia):
"""
Group facility co2 emissions and generation data by plant id and calculate co2 ratio (elec/total)
inputs:
eia (df): data from EIA bulk download, including calculated co2
emissions (all total/fossil, elec total/fossil)
outputs:
grouped_df (df): grouped df with co2 emissions, generation, and a ratio
of co2 from electric fossil fuels to all total (fossil+bio) fuels
"""
cols = ['all fuel fossil co2 (kg)', 'elec fuel fossil co2 (kg)',
'all fuel total co2 (kg)', 'elec fuel total co2 (kg)',
'generation (mwh)']
grouped_df = eia.groupby(['year', 'month', 'plant id'])[cols].sum()
grouped_df.reset_index(inplace=True)
grouped_df['co2 ratio'] = (grouped_df['elec fuel fossil co2 (kg)']
/ grouped_df['all fuel total co2 (kg)'])
grouped_df['co2 ratio'].fillna(0, inplace=True)
return grouped_df
def adjust_epa_emissions(epa, eia_grouped):
"""
Merge 2 dataframes and calculate an adjusted co2 emission for each facility.
This adjusted value accounts for CHP and biomass emissions using calculated
co2 emissions from fuel consumption.
inputs:
epa (df): monthly co2 emissions from each facility
eia_facility (df): grouped EIA facility data
outputs:
epa_adj (df):
"""
eia_keep = ['month', 'year', 'all fuel total co2 (kg)',
'co2 ratio', 'plant id']
epa_adj = epa.merge(eia_grouped[eia_keep],
on=['plant id', 'year', 'month'], how='inner')
# epa_adj.drop(['month', 'year', 'plant id'], axis=1, inplace=True)
epa_adj['epa index'] = (epa_adj.loc[:, 'co2_mass (kg)'] /
epa_adj.loc[:, 'gload (mw)'])
# Start the adjusted co2 column with unadjusted value
epa_adj['adj co2 (kg)'] = epa_adj.loc[:, 'co2_mass (kg)']
# If CEMS reported co2 emissions are 0 but heat inputs are >0 and
# calculated co2 emissions are >0, change the adjusted co2 to NaN. These
# NaN values will be replaced by the calculated value later. Do the same
# for low index records (<300 g/kWh). If there is a valid co2 ratio,
# multiply the adjusted co2 column by the co2 ratio.
epa_adj.loc[~(epa_adj['co2_mass (kg)'] > 0) &
(epa_adj['heat_input (mmbtu)'] > 0) &
(epa_adj['all fuel total co2 (kg)'] > 0),
'adj co2 (kg)'] = np.nan
epa_adj.loc[(epa_adj['epa index'] < 300) &
(epa_adj['heat_input (mmbtu)'] > 0) &
(epa_adj['all fuel total co2 (kg)'] > 0),
'adj co2 (kg)'] = np.nan
epa_adj.loc[epa_adj['co2 ratio'].notnull(),
'adj co2 (kg)'] *= (epa_adj.loc[epa_adj['co2 ratio'].notnull(),
'co2 ratio'])
return epa_adj
def facility_co2(epa_adj, eia_facility):
"""
Merge the plant-level adjusted epa co2 emissions with generation. Create a
new column of final co2 emissions for each plant. Use calculated values from
eia fuel use where epa data don't exist.
inputs:
epa_adj (df): Reported EPA co2 emissions for each facility by month,
with a column for adjusted emissions
eia_facility (df): Fuel consumption, emissions, and generation by
facility
outputs:
df: merged dataframe with a "final co2 (kg)" column
"""
merge_on = ['plant id', 'year', 'month']
df = eia_facility.merge(epa_adj, on=merge_on, how='left')
# keep the adjusted co2 column, but make a copy for final co2
df['final co2 (kg)'] = df.loc[:, 'adj co2 (kg)']
# Use calculated elec fossil co2 where adjusted epa values don't exist
df.loc[df['final co2 (kg)'].isnull(),
'final co2 (kg)'] = df.loc[df['final co2 (kg)'].isnull(),
'elec fuel fossil co2 (kg)']
return df
def group_fuel_cats(df, fuel_cats, fuel_col='fuel', new_col='type',
extra_group_cols=[]):
"""
Group fuels according to the fuel_cats dictionary inplace
"""
for key, values in fuel_cats.items():
df.loc[df[fuel_col].isin(values), new_col] = key
group_cols = [new_col, 'year', 'month'] + extra_group_cols
keep_cols = [new_col, 'year', 'month', 'total fuel (mmbtu)',
'generation (mwh)', 'elec fuel (mmbtu)',
'all fuel fossil co2 (kg)', 'elec fuel fossil co2 (kg)',
'all fuel total co2 (kg)', 'elec fuel total co2 (kg)']
# add plant id back in if it was in the original df
if 'plant id' in df.columns:
group_cols += ['plant id']
keep_cols += ['plant id']
df_grouped = df.groupby(group_cols).sum()
df_grouped.reset_index(inplace=True)
return df_grouped
def extra_emissions_gen(facility_gen_fuels, eia_total, ef):
"""
Augment facility data with EIA estimates of non-reporting facilities. This
information is only available at the state level.
inputs:
facility_gen_fuels: (dataframe) generation, and fuel consumption at
facilities.
eia_total: (dataframe) total generation and fuel consumption from all
facilities (including non-reporting), by state
ef: (dataframe) emission factors for fuel consumption
output:
state_gen_fuels: generation and fuel consumption from non-reporting
facilities
state_co2: co2 emissions from non-reporting facilities
"""
# rename columns in dataframe (all lowercase)
rename_cols(eia_total)
# make sure both dataframes have a 'type' column and the fuel types in
# facilities are the same as those in the eia total data.
assert 'type' in facility_gen_fuels.columns
assert 'type' in eia_total.columns
facility_fuel_cats = facility_gen_fuels['type'].unique()
total_fuel_cats = eia_total['type'].unique()
for fuel in facility_fuel_cats:
assert fuel in total_fuel_cats
# Only keep unique fuel codes - e.g. total solar includes SUN and DPV
keep_types = [u'WWW', u'WND', u'WAS', u'SUN', 'DPV', u'NUC', u'NG',
u'PEL', u'PC', u'OTH', u'COW', u'OOG', u'HPS', u'HYC', u'GEO']
keep_cols = ['generation (mwh)', 'total fuel (mmbtu)', 'elec fuel (mmbtu)',
'all fuel co2 (kg)', 'elec fuel co2 (kg)']
eia_total_monthly = (eia_total.loc[(eia_total['type'].isin(keep_types))]
.groupby(['type', 'year', 'month'])[keep_cols]
.sum())
# Set up a MultiIndex. Useful when subtracting facility from total data
years = facility_gen_fuels.year.unique()
months = facility_gen_fuels.month.unique()
iterables = [total_fuel_cats, years, months]
index = pd.MultiIndex.from_product(iterables=iterables,
names=['type', 'year', 'month'])
# eia_extra will be the difference between total and facility
eia_extra = pd.DataFrame(index=index, columns=['total fuel (mmbtu)',
'generation (mwh)',
'elec fuel (mmbtu)'])
# give gen_fuels a MultiIndex
gen_fuels = facility_gen_fuels.groupby(['type', 'year', 'month']).sum()
# will need the IndexSlice to reference into the MultiIndex
idx = pd.IndexSlice
use_columns=['total fuel (mmbtu)', 'generation (mwh)', 'elec fuel (mmbtu)']
eia_extra = (eia_total_monthly.loc[:, use_columns] -
gen_fuels.loc[:, use_columns])
# I have lumped hydro pumped storage in with conventional hydro in the
# facility data. Because of this, I need to add HPS rows so that the totals
# will add up correctly. Also need to add DPV because it won't show up
# otherwise (not in both dataframes)
eia_extra.loc[idx[['HPS', 'DPV'],:,:],
use_columns] = (eia_total_monthly
.loc[idx[['HPS', 'DPV'],:,:], use_columns])
# consolidate emission factors to match the state-level fuel codes
fuel_factors = reduce_emission_factors(ef)
# Calculate co2 emissions for the state-level fuel categories
eia_extra['all fuel co2 (kg)'] = 0
eia_extra['elec fuel co2 (kg)'] = 0
fuels = [fuel for fuel in total_fuel_cats
if fuel in fuel_factors.keys()]
for fuel in fuels:
eia_extra.loc[idx[fuel,:,:],'all fuel co2 (kg)'] = \
eia_extra.loc[idx[fuel,:,:],'total fuel (mmbtu)'] * fuel_factors[fuel]
eia_extra.loc[idx[fuel,:,:],'elec fuel co2 (kg)'] = \
eia_extra.loc[idx[fuel,:,:],'elec fuel (mmbtu)'] * fuel_factors[fuel]
extra_co2 = (eia_extra.groupby(level=['type', 'year', 'month'])
['all fuel co2 (kg)', 'elec fuel co2 (kg)']
.sum())
extra_gen_fuel = (eia_extra
.drop(['all fuel co2 (kg)', 'elec fuel co2 (kg)'],
axis=1))
return extra_co2, extra_gen_fuel
def reduce_emission_factors(ef, custom_reduce=None):
"""
Reduce the standard fuel emission factors
inputs:
ef (df): emission factors (kg/mmbtu) for every possible fuel
custom_reduce (dict): a custom dictionary to combine fuels. If None,
use the default
"""
# make sure the fuel codes are in the index
assert 'NG' in ef.index
if not custom_reduce:
fuel_factors = {'NG' : ef.loc['NG', 'Fossil Factor'],
'PEL': ef.loc[['DFO', 'RFO'], 'Fossil Factor'].mean(),
'PC' : ef.loc['PC', 'Fossil Factor'],
'COW' : ef.loc[['BIT', 'SUB'], 'Fossil Factor'].mean(),
'OOG' : ef.loc['OG', 'Fossil Factor']}
else:
fuel_factors = custom_reduce
return fuel_factors
def total_gen(df_list, fuel_col='fuel category'):
pass
def co2_calc(fuel, ef):
"""
Calculate co2 emissions based on fuel consumption using emission factors
from EIA/EPA and IPCC
inputs:
fuel: (dataframe) should have columns with the fuel type, total
consumption, and consumption for electricity generation
ef: (dataframe) emission factors (kg co2/mmbtu) for each fuel
output:
dataframe: co2 emissions from fuel consumption (total and for
electricity) at each facility
"""
return co2
# def add_nerc_data()
Change to .subtract()
# coding: utf-8
# from __future__ import division
import pandas as pd
import os
from os.path import join, abspath, normpath, dirname, split
import numpy as np
from util.utils import getParentDir, rename_cols
import json
def add_datetime(df, year='year', month='month'):
df['datetime'] = pd.to_datetime(df[year].astype(str) + '-' +
df[month].astype(str),
format='%Y-%m')
def add_quarter(df, year='year', month='month'):
add_datetime(df, year, month)
df['quarter'] = df['datetime'].dt.quarter
def facility_emission_gen(eia_facility, epa, state_fuel_cat,
custom_fuel_cat, export_state_cats=False):
"""
Use EIA and EPA data to compile emissions, generation and fuel consumption
reported by facilities into emissions intensity and generation by fuel
category. Only facilities from the region of interest should be passed to
this function.
inputs:
eia_facility: (dataframe) monthly generation and fuel consumption as
reported by facilities to EIA
epa: (dataframe) monthly co2 emissions and gross generation as reported
by facilities to EPA
state_fuel_cat (dict): match of state-level fuel categories to facility
level categories
custom_fuel_cat (dict): match of custom fuel categories for final
results to the state-level categories
export_state_cats (boolean): If co2 and gen should be exported at the
state category level
output:
co2: total adjusted co2 emissions
gen_fuels: generation and fuel consumption
"""
# Make column names consistent
print('Renaming columns')
rename_cols(eia_facility)
rename_cols(epa)
print('Grouping facilities')
eia_grouped = group_facility_data(eia_facility)
print('Adjusting EPA emissions')
epa_adj = adjust_epa_emissions(epa, eia_grouped)
# I need to return co2 (by facility, or just per month?) and gen/fuels by
# fuel type.
print('Caculating CO2')
co2 = facility_co2(epa_adj, eia_grouped)
co2 = co2.loc[:, ['year', 'month', 'plant id', 'final co2 (kg)']]
print('Gen/fuels to state categories')
gen_fuels_state = group_fuel_cats(eia_facility, state_fuel_cat)
if export_state_cats:
return co2, gen_fuels_state
else:
print('Gen/fuels to custom categories')
gen_fuels_custom = group_fuel_cats(gen_fuels_state,
custom_fuel_cat,
fuel_col='type',
new_col='fuel category')
return co2, gen_fuels_custom
def group_facility_data(eia):
"""
Group facility co2 emissions and generation data by plant id and calculate co2 ratio (elec/total)
inputs:
eia (df): data from EIA bulk download, including calculated co2
emissions (all total/fossil, elec total/fossil)
outputs:
grouped_df (df): grouped df with co2 emissions, generation, and a ratio
of co2 from electric fossil fuels to all total (fossil+bio) fuels
"""
cols = ['all fuel fossil co2 (kg)', 'elec fuel fossil co2 (kg)',
'all fuel total co2 (kg)', 'elec fuel total co2 (kg)',
'generation (mwh)']
grouped_df = eia.groupby(['year', 'month', 'plant id'])[cols].sum()
grouped_df.reset_index(inplace=True)
grouped_df['co2 ratio'] = (grouped_df['elec fuel fossil co2 (kg)']
/ grouped_df['all fuel total co2 (kg)'])
grouped_df['co2 ratio'].fillna(0, inplace=True)
return grouped_df
def adjust_epa_emissions(epa, eia_grouped):
"""
Merge 2 dataframes and calculate an adjusted co2 emission for each facility.
This adjusted value accounts for CHP and biomass emissions using calculated
co2 emissions from fuel consumption.
inputs:
epa (df): monthly co2 emissions from each facility
eia_facility (df): grouped EIA facility data
outputs:
epa_adj (df):
"""
eia_keep = ['month', 'year', 'all fuel total co2 (kg)',
'co2 ratio', 'plant id']
epa_adj = epa.merge(eia_grouped[eia_keep],
on=['plant id', 'year', 'month'], how='inner')
# epa_adj.drop(['month', 'year', 'plant id'], axis=1, inplace=True)
epa_adj['epa index'] = (epa_adj.loc[:, 'co2_mass (kg)'] /
epa_adj.loc[:, 'gload (mw)'])
# Start the adjusted co2 column with unadjusted value
epa_adj['adj co2 (kg)'] = epa_adj.loc[:, 'co2_mass (kg)']
# If CEMS reported co2 emissions are 0 but heat inputs are >0 and
# calculated co2 emissions are >0, change the adjusted co2 to NaN. These
# NaN values will be replaced by the calculated value later. Do the same
# for low index records (<300 g/kWh). If there is a valid co2 ratio,
# multiply the adjusted co2 column by the co2 ratio.
epa_adj.loc[~(epa_adj['co2_mass (kg)'] > 0) &
(epa_adj['heat_input (mmbtu)'] > 0) &
(epa_adj['all fuel total co2 (kg)'] > 0),
'adj co2 (kg)'] = np.nan
epa_adj.loc[(epa_adj['epa index'] < 300) &
(epa_adj['heat_input (mmbtu)'] > 0) &
(epa_adj['all fuel total co2 (kg)'] > 0),
'adj co2 (kg)'] = np.nan
epa_adj.loc[epa_adj['co2 ratio'].notnull(),
'adj co2 (kg)'] *= (epa_adj.loc[epa_adj['co2 ratio'].notnull(),
'co2 ratio'])
return epa_adj
def facility_co2(epa_adj, eia_facility):
"""
Merge the plant-level adjusted epa co2 emissions with generation. Create a
new column of final co2 emissions for each plant. Use calculated values from
eia fuel use where epa data don't exist.
inputs:
epa_adj (df): Reported EPA co2 emissions for each facility by month,
with a column for adjusted emissions
eia_facility (df): Fuel consumption, emissions, and generation by
facility
outputs:
df: merged dataframe with a "final co2 (kg)" column
"""
merge_on = ['plant id', 'year', 'month']
df = eia_facility.merge(epa_adj, on=merge_on, how='left')
# keep the adjusted co2 column, but make a copy for final co2
df['final co2 (kg)'] = df.loc[:, 'adj co2 (kg)']
# Use calculated elec fossil co2 where adjusted epa values don't exist
df.loc[df['final co2 (kg)'].isnull(),
'final co2 (kg)'] = df.loc[df['final co2 (kg)'].isnull(),
'elec fuel fossil co2 (kg)']
return df
def group_fuel_cats(df, fuel_cats, fuel_col='fuel', new_col='type',
extra_group_cols=[]):
"""
Group fuels according to the fuel_cats dictionary inplace
"""
for key, values in fuel_cats.items():
df.loc[df[fuel_col].isin(values), new_col] = key
group_cols = [new_col, 'year', 'month'] + extra_group_cols
keep_cols = [new_col, 'year', 'month', 'total fuel (mmbtu)',
'generation (mwh)', 'elec fuel (mmbtu)',
'all fuel fossil co2 (kg)', 'elec fuel fossil co2 (kg)',
'all fuel total co2 (kg)', 'elec fuel total co2 (kg)']
# add plant id back in if it was in the original df
if 'plant id' in df.columns:
group_cols += ['plant id']
keep_cols += ['plant id']
df_grouped = df.groupby(group_cols).sum()
df_grouped.reset_index(inplace=True)
return df_grouped
def extra_emissions_gen(facility_gen_fuels, eia_total, ef):
"""
Augment facility data with EIA estimates of non-reporting facilities. This
information is only available at the state level.
inputs:
facility_gen_fuels: (dataframe) generation, and fuel consumption at
facilities.
eia_total: (dataframe) total generation and fuel consumption from all
facilities (including non-reporting), by state
ef: (dataframe) emission factors for fuel consumption
output:
state_gen_fuels: generation and fuel consumption from non-reporting
facilities
state_co2: co2 emissions from non-reporting facilities
"""
# rename columns in dataframe (all lowercase)
rename_cols(eia_total)
# make sure both dataframes have a 'type' column and the fuel types in
# facilities are the same as those in the eia total data.
assert 'type' in facility_gen_fuels.columns
assert 'type' in eia_total.columns
facility_fuel_cats = facility_gen_fuels['type'].unique()
total_fuel_cats = eia_total['type'].unique()
for fuel in facility_fuel_cats:
assert fuel in total_fuel_cats
# Only keep unique fuel codes - e.g. total solar includes SUN and DPV
keep_types = [u'WWW', u'WND', u'WAS', u'SUN', 'DPV', u'NUC', u'NG',
u'PEL', u'PC', u'OTH', u'COW', u'OOG', u'HPS', u'HYC', u'GEO']
keep_cols = ['generation (mwh)', 'total fuel (mmbtu)', 'elec fuel (mmbtu)',
'all fuel co2 (kg)', 'elec fuel co2 (kg)']
eia_total_monthly = (eia_total.loc[(eia_total['type'].isin(keep_types))]
.groupby(['type', 'year', 'month'])[keep_cols]
.sum())
# Set up a MultiIndex. Useful when subtracting facility from total data
years = facility_gen_fuels.year.unique()
months = facility_gen_fuels.month.unique()
iterables = [total_fuel_cats, years, months]
index = pd.MultiIndex.from_product(iterables=iterables,
names=['type', 'year', 'month'])
# eia_extra will be the difference between total and facility
eia_extra = pd.DataFrame(index=index, columns=['total fuel (mmbtu)',
'generation (mwh)',
'elec fuel (mmbtu)'])
# give gen_fuels a MultiIndex
gen_fuels = facility_gen_fuels.groupby(['type', 'year', 'month']).sum()
# will need the IndexSlice to reference into the MultiIndex
idx = pd.IndexSlice
# Need to use .subtract() here because of NaN values
use_columns=['total fuel (mmbtu)', 'generation (mwh)', 'elec fuel (mmbtu)']
eia_extra = (eia_total_monthly.loc[:, use_columns]
.subtract(gen_fuels.loc[:, use_columns], fill_value=0))
# I have lumped hydro pumped storage in with conventional hydro in the
# facility data. Because of this, I need to add HPS rows so that the totals
# will add up correctly. Also need to add DPV because it won't show up
# otherwise (not in both dataframes)
eia_extra.loc[idx[['HPS', 'DPV'],:,:],
use_columns] = (eia_total_monthly
.loc[idx[['HPS', 'DPV'],:,:], use_columns])
# consolidate emission factors to match the state-level fuel codes
fuel_factors = reduce_emission_factors(ef)
# Calculate co2 emissions for the state-level fuel categories
eia_extra['all fuel co2 (kg)'] = 0
eia_extra['elec fuel co2 (kg)'] = 0
fuels = [fuel for fuel in total_fuel_cats
if fuel in fuel_factors.keys()]
for fuel in fuels:
eia_extra.loc[idx[fuel,:,:],'all fuel co2 (kg)'] = \
eia_extra.loc[idx[fuel,:,:],'total fuel (mmbtu)'] * fuel_factors[fuel]
eia_extra.loc[idx[fuel,:,:],'elec fuel co2 (kg)'] = \
eia_extra.loc[idx[fuel,:,:],'elec fuel (mmbtu)'] * fuel_factors[fuel]
extra_co2 = (eia_extra.groupby(level=['type', 'year', 'month'])
['all fuel co2 (kg)', 'elec fuel co2 (kg)']
.sum())
extra_gen_fuel = (eia_extra
.drop(['all fuel co2 (kg)', 'elec fuel co2 (kg)'],
axis=1))
return extra_co2, extra_gen_fuel
def reduce_emission_factors(ef, custom_reduce=None):
"""
Reduce the standard fuel emission factors
inputs:
ef (df): emission factors (kg/mmbtu) for every possible fuel
custom_reduce (dict): a custom dictionary to combine fuels. If None,
use the default
"""
# make sure the fuel codes are in the index
assert 'NG' in ef.index
if not custom_reduce:
fuel_factors = {'NG' : ef.loc['NG', 'Fossil Factor'],
'PEL': ef.loc[['DFO', 'RFO'], 'Fossil Factor'].mean(),
'PC' : ef.loc['PC', 'Fossil Factor'],
'COW' : ef.loc[['BIT', 'SUB'], 'Fossil Factor'].mean(),
'OOG' : ef.loc['OG', 'Fossil Factor']}
else:
fuel_factors = custom_reduce
return fuel_factors
def total_gen(df_list, fuel_col='fuel category'):
pass
def co2_calc(fuel, ef):
"""
Calculate co2 emissions based on fuel consumption using emission factors
from EIA/EPA and IPCC
inputs:
fuel: (dataframe) should have columns with the fuel type, total
consumption, and consumption for electricity generation
ef: (dataframe) emission factors (kg co2/mmbtu) for each fuel
output:
dataframe: co2 emissions from fuel consumption (total and for
electricity) at each facility
"""
return co2
# def add_nerc_data()
|
# -*- coding: utf-8 -*-
#################################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Julius Network Solutions SARL <contact@julius.fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
from datetime import datetime
from osv import fields, osv
from tools.translate import _
import netsvc
class one2many_special(fields.one2many):
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if not values:
values = {}
res = {}
location_ids = []
for id in ids:
res[id] = []
location_id = obj.pool.get('stock.tracking').read(cr, user, id, ['location_id'])['location_id']
if location_id and location_id[0] and location_id[0] not in location_ids:
location_ids.append(location_id[0])
ids2 = obj.pool.get(self._obj).search(cr, user, self._domain + [(self._fields_id, 'in', ids), ('location_dest_id', 'in', location_ids)], limit=self._limit)
for r in obj.pool.get(self._obj)._read_flat(cr, user, ids2, [self._fields_id], context=context, load='_classic_write'):
res[r[self._fields_id]].append( r['id'] )
return res
class stock_tracking(osv.osv):
_inherit = 'stock.tracking'
def hierarchy_ids(self, tracking):
result_list = [tracking]
for child in tracking.child_ids:
result_list.extend(self.hierarchy_ids(child))
return result_list
def _get_child_products(self, cr, uid, ids, field_name, arg, context=None):
packs = self.browse(cr, uid, ids)
res = {}
for pack in packs:
res[pack.id] = []
childs = self.hierarchy_ids(pack)
for child in childs:
for prod in child.product_ids:
res[pack.id].append(prod.id)
return res
def _get_child_serials(self, cr, uid, ids, field_name, arg, context=None):
packs = self.browse(cr, uid, ids)
res = {}
for pack in packs:
res[pack.id] = []
childs = self.hierarchy_ids(pack)
for child in childs:
for serial in child.serial_ids:
res[pack.id].append(serial.id)
return res
_columns = {
'parent_id': fields.many2one('stock.tracking', 'Parent'),
'child_ids': fields.one2many('stock.tracking', 'parent_id', 'Children'),
'ul_id': fields.many2one('product.ul', 'Logistic unit', readonly=True, states={'open':[('readonly',False)]}),
'location_id': fields.many2one('stock.location', 'Location', required=True, readonly=True, states={'open':[('readonly',False)]}),
'state': fields.selection([('open','Open'),('close','Close')], 'State', readonly=True),
'product_ids': fields.one2many('product.stock.tracking', 'tracking_id', 'Products', readonly=True, states={'open':[('readonly',False)]}),
'child_product_ids': fields.function(_get_child_products, method=True, type='one2many', obj='product.stock.tracking', string='Child Products'),
'history_ids': fields.one2many('stock.tracking.history', 'tracking_id', 'History'),
'current_move_ids': one2many_special('stock.move', 'tracking_id', 'Current moves', domain=[('pack_history_id', '=', False)], readonly=True),
'name': fields.char('Pack Reference', size=64, required=True, readonly=True, states={'open':[('readonly',False)]}),
'date': fields.datetime('Creation Date', required=True, readonly=True, states={'open':[('readonly',False)]}),
'serial_ids': fields.one2many('serial.stock.tracking', 'tracking_id', 'Products', readonly=True, states={'open':[('readonly',False)]}),
'child_serial_ids': fields.function(_get_child_serials, method=True, type='one2many', obj='serial.stock.tracking', string='Child Serials'),
}
def _check_parent_id(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
if lines[0].parent_id:
if lines[0].ul_id.capacity_index > lines[0].parent_id.ul_id.capacity_index:
return False
return True
_constraints = [(_check_parent_id, 'Bad parent type selection. Please try again.',['parent_id'] ),]
_defaults = {
'state': 'open',
}
def reset_open(self, cr, uid, ids, context=None):
pack_ids = self.browse(cr, uid, ids, context)
for pack in pack_ids:
allowed = True
if pack.parent_id:
if pack.parent_id and pack.parent_id != 'open':
self.write(cr, uid, [pack.parent_id.id], {'state': 'open'})
# allowed = False
# raise osv.except_osv(_('Not allowed !'),_('You can\'t re-open this pack because the parent pack is close'))
if allowed:
for child in pack.child_ids:
if child.state == 'open':
allowed = False
raise osv.except_osv(_('Not allowed !'),_('You can\'t re-open this pack because there is at least one not closed child'))
break
if allowed:
self.write(cr, uid, [pack.id], {'state': 'open'})
return True
def set_close(self, cr, uid, ids, context=None):
pack_ids = self.browse(cr, uid, ids, context)
for pack in pack_ids:
allowed = True
for child in pack.child_ids:
if child.state == 'open':
allowed = False
raise osv.except_osv(_('Not allowed !'),_('You can\'t close this pack because there is at least one not closed child'))
break
# if allowed:
# self.write(cr, uid, [pack.id], {'state': 'close'})
return True
def get_products(self, cr, uid, ids, context=None):
pack_ids = self.browse(cr, uid, ids, context)
stock_track = self.pool.get('product.stock.tracking')
for pack in pack_ids:
childs = self.hierarchy_ids(pack)
for child in childs:
product_ids = [x.id for x in child.product_ids]
stock_track.unlink(cr, uid, product_ids)
product_list = {}
for x in child.current_move_ids:
if x.location_dest_id.id == child.location_id.id:
if x.product_id.id not in product_list.keys():
product_list.update({x.product_id.id:x.product_qty})
else:
product_list[x.product_id.id] += x.product_qty
for product in product_list.keys():
stock_track.create(cr, uid, {'product_id': product, 'quantity': product_list[product], 'tracking_id': child.id})
return True
def get_serials(self, cr, uid, ids, context=None):
pack_ids = self.browse(cr, uid, ids, context)
serial_track = self.pool.get('serial.stock.tracking')
serial_obj = self.pool.get('stock.production.lot')
for pack in pack_ids:
childs = self.hierarchy_ids(pack)
for child in childs:
serial_ids = [x.id for x in child.serial_ids]
serial_track.unlink(cr, uid, serial_ids)
serial_list = {}
for x in child.current_move_ids:
if x.location_dest_id.id == child.location_id.id:
if x.prodlot_id.id not in serial_list.keys():
serial_list.update({x.prodlot_id.id:x.product_qty})
else:
serial_list[x.prodlot_id.id] += x.product_qty
for serial in serial_list.keys():
if serial:
serial_track.create(cr, uid, {'serial_id': serial, 'quantity': serial_list[serial], 'tracking_id': child.id})
serial_obj.write(cr, uid, [serial], {'tracking_id': child.id})
return True
stock_tracking()
class product_ul(osv.osv):
_inherit = "product.ul"
_description = "Shipping Unit"
_columns = {
'capacity_index': fields.integer('Capacity index'),
}
_order = 'capacity_index'
product_ul()
class product_stock_tracking(osv.osv):
_name = 'product.stock.tracking'
_columns = {
'product_id': fields.many2one('product.product', 'Product'),
'quantity': fields.float('Quantity'),
'tracking_id': fields.many2one('stock.tracking', 'Tracking'),
# 'tracking_history_id': fields.many2one('stock.tracking.history', 'Tracking History'),
}
product_stock_tracking()
class serial_stock_tracking(osv.osv):
_name = 'serial.stock.tracking'
_order = 'tracking_id,serial_id'
_columns = {
'serial_id': fields.many2one('stock.production.lot', 'Serial'),
'product_id': fields.related('serial_id', 'product_id', type='many2one', relation='product.product', string='Product'),
'quantity': fields.float('Quantity'),
'tracking_id': fields.many2one('stock.tracking', 'Tracking'),
# 'tracking_history_id': fields.many2one('stock.tracking.history', 'Tracking History'),
}
serial_stock_tracking()
class stock_tracking_history(osv.osv):
_name = "stock.tracking.history"
def _get_types(self, cr, uid, context={}):
# res = [('pack_in','Add parent'),('pack_out','Unlink parent'),('move','Move')]
res = []
return res
# def hierarchy_ids(self, tracking):
# result_list = [tracking]
# for child in tracking.child_ids:
# result_list.extend(self.hierarchy_ids(child))
# return result_list
#
# def _get_child_products(self, cr, uid, ids, field_name, arg, context=None):
# packs = self.browse(cr, uid, ids)
# res = {}
# for pack in packs:
# res[pack.id] = []
# childs = self.hierarchy_ids(pack)
# for child in childs:
# for prod in child.product_ids:
# res[pack.id].append(prod.id)
# return res
_columns = {
'tracking_id': fields.many2one('stock.tracking', 'Pack', required=True),
'type': fields.selection(_get_types, 'Type'),
# 'product_ids': fields.one2many('product.stock.tracking', 'tracking_history_id', 'Products'),
# 'child_product_ids': fields.function(_get_child_products, method=True, type='one2many', obj='product.stock.tracking', string='Child Products'),
# 'parent_hist_id': fields.many2one('stock.tracking.history', 'Parent history pack'),
# 'child_ids': fields.one2many('stock.tracking.history', 'parent_hist_id', 'Child history pack'),
}
_rec_name = "tracking_id"
stock_tracking_history()
'''Add a field in order to store the current pack in a production lot'''
class stock_production_lot(osv.osv):
_inherit = 'stock.production.lot'
_columns = {
'tracking_id': fields.many2one('stock.tracking', 'pack'),
}
stock_production_lot()
class product_category(osv.osv):
_inherit = 'product.category'
_columns = {
'tracked': fields.boolean('Need a serial code ?'),
}
product_category()
class stock_inventory(osv.osv):
_inherit = 'stock.inventory'
_defaults = {
'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'stock.inventory') or '/'
}
stock_inventory()
class stock_move(osv.osv):
_inherit = 'stock.move'
_columns = {
# 'cancel_cascade': fields.boolean('Cancel Cascade', help='If checked, when this move is cancelled, cancel the linked move too')
}
def create(self, cr, uid, vals, context=None):
production_lot_obj = self.pool.get('stock.production.lot')
stock_tracking_obj = self.pool.get('stock.tracking')
if vals.get('prodlot_id',False):
production_lot_data = production_lot_obj.browse(cr, uid, vals['prodlot_id'])
last_production_lot_move_id = self.search(cr, uid, [('prodlot_id', '=', production_lot_data.id)], limit=1, order='date')
if last_production_lot_move_id:
last_production_lot_move = self.browse(cr,uid,last_production_lot_move_id[0])
if last_production_lot_move.tracking_id:
ids = [last_production_lot_move.tracking_id.id]
stock_tracking_obj.reset_open(cr, uid, ids, context=None)
return super(stock_move,self).create(cr, uid, vals, context)
stock_move()
class split_in_production_lot(osv.osv_memory):
_inherit = "stock.move.split"
_columns = {
'use_exist' : fields.boolean('Existing Lots', invisible=True),
}
_defaults = {
'use_exist': lambda *a: True,
}
def default_get(self, cr, uid, fields, context=None):
res = super(split_in_production_lot, self).default_get(cr, uid, fields, context)
res.update({'use_exist': True})
return res
split_in_production_lot()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
internediate saved version for voltalis
# -*- coding: utf-8 -*-
#################################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Julius Network Solutions SARL <contact@julius.fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
from datetime import datetime
from osv import fields, osv
from tools.translate import _
import netsvc
class one2many_special(fields.one2many):
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if not values:
values = {}
res = {}
location_ids = []
for id in ids:
res[id] = []
location_id = obj.pool.get('stock.tracking').read(cr, user, id, ['location_id'])['location_id']
if location_id and location_id[0] and location_id[0] not in location_ids:
location_ids.append(location_id[0])
ids2 = obj.pool.get(self._obj).search(cr, user, self._domain + [(self._fields_id, 'in', ids), ('location_dest_id', 'in', location_ids)], limit=self._limit)
for r in obj.pool.get(self._obj)._read_flat(cr, user, ids2, [self._fields_id], context=context, load='_classic_write'):
res[r[self._fields_id]].append( r['id'] )
return res
class stock_tracking(osv.osv):
_inherit = 'stock.tracking'
def hierarchy_ids(self, tracking):
result_list = [tracking]
for child in tracking.child_ids:
result_list.extend(self.hierarchy_ids(child))
return result_list
def _get_child_products(self, cr, uid, ids, field_name, arg, context=None):
packs = self.browse(cr, uid, ids)
res = {}
for pack in packs:
res[pack.id] = []
childs = self.hierarchy_ids(pack)
for child in childs:
for prod in child.product_ids:
res[pack.id].append(prod.id)
return res
def _get_child_serials(self, cr, uid, ids, field_name, arg, context=None):
packs = self.browse(cr, uid, ids)
res = {}
for pack in packs:
res[pack.id] = []
childs = self.hierarchy_ids(pack)
for child in childs:
for serial in child.serial_ids:
res[pack.id].append(serial.id)
return res
_columns = {
'parent_id': fields.many2one('stock.tracking', 'Parent'),
'child_ids': fields.one2many('stock.tracking', 'parent_id', 'Children'),
'ul_id': fields.many2one('product.ul', 'Logistic unit', readonly=True, states={'open':[('readonly',False)]}),
'location_id': fields.many2one('stock.location', 'Location', required=True, readonly=True, states={'open':[('readonly',False)]}),
'state': fields.selection([('open','Open'),('close','Close')], 'State', readonly=True),
'product_ids': fields.one2many('product.stock.tracking', 'tracking_id', 'Products', readonly=True, states={'open':[('readonly',False)]}),
'child_product_ids': fields.function(_get_child_products, method=True, type='one2many', obj='product.stock.tracking', string='Child Products'),
'history_ids': fields.one2many('stock.tracking.history', 'tracking_id', 'History'),
'current_move_ids': one2many_special('stock.move', 'tracking_id', 'Current moves', domain=[('pack_history_id', '=', False)], readonly=True),
'name': fields.char('Pack Reference', size=64, required=True, readonly=True, states={'open':[('readonly',False)]}),
'date': fields.datetime('Creation Date', required=True, readonly=True, states={'open':[('readonly',False)]}),
'serial_ids': fields.one2many('serial.stock.tracking', 'tracking_id', 'Products', readonly=True, states={'open':[('readonly',False)]}),
'child_serial_ids': fields.function(_get_child_serials, method=True, type='one2many', obj='serial.stock.tracking', string='Child Serials'),
}
def _check_parent_id(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
if lines[0].parent_id:
if lines[0].ul_id.capacity_index > lines[0].parent_id.ul_id.capacity_index:
return False
return True
_constraints = [(_check_parent_id, 'Bad parent type selection. Please try again.',['parent_id'] ),]
_defaults = {
'state': 'open',
}
def reset_open(self, cr, uid, ids, context=None):
pack_ids = self.browse(cr, uid, ids, context)
for pack in pack_ids:
allowed = True
if pack.parent_id:
if pack.parent_id and pack.parent_id != 'open':
self.write(cr, uid, [pack.parent_id.id], {'state': 'open'})
# allowed = False
# raise osv.except_osv(_('Not allowed !'),_('You can\'t re-open this pack because the parent pack is close'))
if allowed:
for child in pack.child_ids:
if child.state == 'open':
allowed = False
raise osv.except_osv(_('Not allowed !'),_('You can\'t re-open this pack because there is at least one not closed child'))
break
if allowed:
self.write(cr, uid, [pack.id], {'state': 'open'})
return True
def set_close(self, cr, uid, ids, context=None):
pack_ids = self.browse(cr, uid, ids, context)
for pack in pack_ids:
allowed = True
for child in pack.child_ids:
if child.state == 'open':
allowed = False
raise osv.except_osv(_('Not allowed !'),_('You can\'t close this pack because there is at least one not closed child'))
break
# if allowed:
# self.write(cr, uid, [pack.id], {'state': 'close'})
return True
def get_products(self, cr, uid, ids, context=None):
pack_ids = self.browse(cr, uid, ids, context)
stock_track = self.pool.get('product.stock.tracking')
for pack in pack_ids:
childs = self.hierarchy_ids(pack)
for child in childs:
product_ids = [x.id for x in child.product_ids]
stock_track.unlink(cr, uid, product_ids)
product_list = {}
for x in child.current_move_ids:
if x.location_dest_id.id == child.location_id.id:
if x.product_id.id not in product_list.keys():
product_list.update({x.product_id.id:x.product_qty})
else:
product_list[x.product_id.id] += x.product_qty
for product in product_list.keys():
stock_track.create(cr, uid, {'product_id': product, 'quantity': product_list[product], 'tracking_id': child.id})
return True
def get_serials(self, cr, uid, ids, context=None):
pack_ids = self.browse(cr, uid, ids, context)
serial_track = self.pool.get('serial.stock.tracking')
serial_obj = self.pool.get('stock.production.lot')
for pack in pack_ids:
childs = self.hierarchy_ids(pack)
for child in childs:
serial_ids = [x.id for x in child.serial_ids]
serial_track.unlink(cr, uid, serial_ids)
serial_list = {}
for x in child.current_move_ids:
if x.location_dest_id.id == child.location_id.id:
if x.prodlot_id.id not in serial_list.keys():
serial_list.update({x.prodlot_id.id:x.product_qty})
else:
serial_list[x.prodlot_id.id] += x.product_qty
for serial in serial_list.keys():
if serial:
serial_track.create(cr, uid, {'serial_id': serial, 'quantity': serial_list[serial], 'tracking_id': child.id})
serial_obj.write(cr, uid, [serial], {'tracking_id': child.id})
return True
stock_tracking()
class product_ul(osv.osv):
_inherit = "product.ul"
_description = "Shipping Unit"
_columns = {
'capacity_index': fields.integer('Capacity index'),
}
_order = 'capacity_index'
product_ul()
class product_stock_tracking(osv.osv):
_name = 'product.stock.tracking'
_columns = {
'product_id': fields.many2one('product.product', 'Product'),
'quantity': fields.float('Quantity'),
'tracking_id': fields.many2one('stock.tracking', 'Tracking'),
# 'tracking_history_id': fields.many2one('stock.tracking.history', 'Tracking History'),
}
product_stock_tracking()
class serial_stock_tracking(osv.osv):
_name = 'serial.stock.tracking'
_order = 'tracking_id,serial_id'
_columns = {
'serial_id': fields.many2one('stock.production.lot', 'Serial'),
'product_id': fields.related('serial_id', 'product_id', type='many2one', relation='product.product', string='Product'),
'quantity': fields.float('Quantity'),
'tracking_id': fields.many2one('stock.tracking', 'Tracking'),
# 'tracking_history_id': fields.many2one('stock.tracking.history', 'Tracking History'),
}
serial_stock_tracking()
class stock_tracking_history(osv.osv):
_name = "stock.tracking.history"
def _get_types(self, cr, uid, context={}):
# res = [('pack_in','Add parent'),('pack_out','Unlink parent'),('move','Move')]
res = []
return res
# def hierarchy_ids(self, tracking):
# result_list = [tracking]
# for child in tracking.child_ids:
# result_list.extend(self.hierarchy_ids(child))
# return result_list
#
# def _get_child_products(self, cr, uid, ids, field_name, arg, context=None):
# packs = self.browse(cr, uid, ids)
# res = {}
# for pack in packs:
# res[pack.id] = []
# childs = self.hierarchy_ids(pack)
# for child in childs:
# for prod in child.product_ids:
# res[pack.id].append(prod.id)
# return res
_columns = {
'tracking_id': fields.many2one('stock.tracking', 'Pack', required=True),
'type': fields.selection(_get_types, 'Type'),
# 'product_ids': fields.one2many('product.stock.tracking', 'tracking_history_id', 'Products'),
# 'child_product_ids': fields.function(_get_child_products, method=True, type='one2many', obj='product.stock.tracking', string='Child Products'),
# 'parent_hist_id': fields.many2one('stock.tracking.history', 'Parent history pack'),
# 'child_ids': fields.one2many('stock.tracking.history', 'parent_hist_id', 'Child history pack'),
}
_rec_name = "tracking_id"
stock_tracking_history()
'''Add a field in order to store the current pack in a production lot'''
class stock_production_lot(osv.osv):
_inherit = 'stock.production.lot'
_columns = {
'tracking_id': fields.many2one('stock.tracking', 'pack'),
}
stock_production_lot()
class product_category(osv.osv):
_inherit = 'product.category'
_columns = {
'tracked': fields.boolean('Need a serial code ?'),
}
product_category()
class stock_inventory(osv.osv):
_inherit = 'stock.inventory'
_defaults = {
'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'stock.inventory') or '/'
}
stock_inventory()
class stock_move(osv.osv):
_inherit = 'stock.move'
_columns = {
'move_ori_id': fields.many2one('stock.move', 'Origin Move', select=True),
# 'cancel_cascade': fields.boolean('Cancel Cascade', help='If checked, when this move is cancelled, cancel the linked move too')
}
def write(self, cr, uid, ids, vals, context=None):
result = super(stock_move,self).write(cr, uid, ids, vals, context)
for id in ids:
state = self.browse(cr, uid, id, context).state
move_ori_id = self.browse(cr, uid, id, context).move_ori_id
if state == 'done' and move_ori_id:
self.write(cr, uid, [move_ori_id], {'state':'done'}, context)
return result
def create(self, cr, uid, vals, context=None):
production_lot_obj = self.pool.get('stock.production.lot')
stock_tracking_obj = self.pool.get('stock.tracking')
if vals.get('prodlot_id',False):
production_lot_data = production_lot_obj.browse(cr, uid, vals['prodlot_id'])
last_production_lot_move_id = self.search(cr, uid, [('prodlot_id', '=', production_lot_data.id)], limit=1, order='date')
if last_production_lot_move_id:
last_production_lot_move = self.browse(cr,uid,last_production_lot_move_id[0])
if last_production_lot_move.tracking_id:
ids = [last_production_lot_move.tracking_id.id]
stock_tracking_obj.reset_open(cr, uid, ids, context=None)
return super(stock_move,self).create(cr, uid, vals, context)
stock_move()
class split_in_production_lot(osv.osv_memory):
_inherit = "stock.move.split"
_columns = {
'use_exist' : fields.boolean('Existing Lots', invisible=True),
}
_defaults = {
'use_exist': lambda *a: True,
}
def default_get(self, cr, uid, fields, context=None):
res = super(split_in_production_lot, self).default_get(cr, uid, fields, context)
res.update({'use_exist': True})
return res
split_in_production_lot()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
from __future__ import absolute_import
# stdlib, alphabetical
import cPickle
import logging
# Core Django, alphabetical
from django.db import models
from django.utils.translation import ugettext_lazy as _l
__all__ = ('Async',)
LOGGER = logging.getLogger(__name__)
class Async(models.Model):
""" Stores information about currently running asynchronous tasks. """
completed = models.BooleanField(default=False,
verbose_name=_l('Completed'),
help_text=_l("True if this task has finished."))
was_error = models.BooleanField(default=False,
verbose_name=_l('Was there an exception?'),
help_text=_l("True if this task threw an exception."))
_result = models.BinaryField(null=True, db_column='result')
_error = models.BinaryField(null=True, db_column='error')
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
completed_time = models.DateTimeField(null=True)
@property
def result(self):
return cPickle.loads(self._result)
@result.setter
def result(self, value):
self._result = cPickle.dumps(value)
@property
def error(self):
return cPickle.loads(self._error)
@error.setter
def error(self, value):
self._error = cPickle.dumps(str(type(value)) + ": " + str(value))
class Meta:
verbose_name = _l("Async")
app_label = 'locations'
def __unicode__(self):
return str(self.id)
Async: convert buffer to str before pickling
This fixes #358.
from __future__ import absolute_import
# stdlib, alphabetical
import cPickle
import logging
# Core Django, alphabetical
from django.db import models
from django.utils import six
from django.utils.translation import ugettext_lazy as _l
__all__ = ('Async',)
LOGGER = logging.getLogger(__name__)
class Async(models.Model):
""" Stores information about currently running asynchronous tasks. """
completed = models.BooleanField(default=False,
verbose_name=_l('Completed'),
help_text=_l("True if this task has finished."))
was_error = models.BooleanField(default=False,
verbose_name=_l('Was there an exception?'),
help_text=_l("True if this task threw an exception."))
_result = models.BinaryField(null=True, db_column='result')
_error = models.BinaryField(null=True, db_column='error')
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
completed_time = models.DateTimeField(null=True)
@property
def result(self):
result = self._result
if isinstance(result, six.memoryview):
result = str(result)
return cPickle.loads(result)
@result.setter
def result(self, value):
self._result = cPickle.dumps(value)
@property
def error(self):
return cPickle.loads(self._error)
@error.setter
def error(self, value):
self._error = cPickle.dumps(str(type(value)) + ": " + str(value))
class Meta:
verbose_name = _l("Async")
app_label = 'locations'
def __unicode__(self):
return str(self.id)
|
#!/usr/bin/env python
# Copyright 2017 Patrick Laughrea
# Licensed under the Apache License, Version 2.0
local_path = None
local_path_line = 4 # for local set and reset, 0-based index
online_path = 'https://raw.githubusercontent.com/github/gitignore/master/'
self_path = 'https://raw.githubusercontent.com/pat-laugh/gitignore-modifier/master/gitignore.py'
version = [1, 5, 0, 'dev', 6]
version_line = 8
import sys, os, re
from subprocess import call
py_v3 = sys.version_info[0] == 3
if py_v3:
import urllib.request
else:
import urllib2
name_gitignore = '.gitignore'
junk_lines = []
gitignores = {}
used_gitignores = []
class Option:
NONE = 0
UNKNOWN = 1
ADD = 2
CREATE = 3
REMOVE = 4
UPDATE = 5
CLEAR = 6
LOCAL = 7
LIST = 8
SELF_UPDATE = 9
options = {
'add': Option.ADD,
'create': Option.CREATE,
'remove': Option.REMOVE,
'update': Option.UPDATE,
'clear': Option.CLEAR,
'local': Option.LOCAL,
'list': Option.LIST,
'self-update': Option.SELF_UPDATE,
}
def print_options():
print(' add Adds templates to the .gitignore file')
print(' create Creates a new .gitignore file')
print(' remove Removes templates from the .gitignore file')
print(' update Updates each template in the .gitignore file')
print(' clear Removes all templates from the .gitignore file')
print(' ------------------------------------------------------------------------')
print(' local Local has the following suboptions:')
print_local_suboptions()
print(' ------------------------------------------------------------------------')
print(' list Prints a sorted list of all templates in the .gitignore file')
print(' self-update Updates this program')
def main(argc, argv):
check_modifiers(argv)
option = get_option(argc, argv)
if option == Option.NONE:
print('Error: no argument provided')
print('Options are:')
print_options()
sys.exit(1)
elif option == Option.UNKNOWN:
print('Error: unknown option "%s"' % argv[1])
print_similar_names(argv[1].lower(), options.keys())
sys.exit(1)
elif option == Option.LOCAL:
option_local(argc, argv)
sys.exit(0)
elif option == Option.SELF_UPDATE:
option_self_update(argc, argv)
sys.exit(0)
if local_path is not None:
set_names_local(local_path)
if not check_file_gitignore(option):
sys.exit('Error: no %s file found' % name_gitignore)
if option == Option.ADD:
option_add(argc, argv)
elif option == Option.CREATE:
if argc == 2:
sys.exit(0)
option_add(argc, argv)
elif option == Option.REMOVE:
option_remove(argc, argv)
elif option == Option.UPDATE:
option_update(argc, argv)
elif option == Option.CLEAR:
option_clear(argc, argv)
elif option == Option.LIST:
option_list(argc, argv)
sys.exit(0)
write_file(name_gitignore)
def check_modifiers(argv):
if ('-f' in argv):
modifier_file(argv, argv.index('-f'))
elif ('--file' in argv):
modifier_file(argv, argv.index('--file'))
def modifier_file(argv, index):
global name_gitignore
index_filename = index + 1;
if (index_filename >= len(argv)):
sys.exit('Error: a name must be provided for modifier -f or --file')
name_gitignore = argv[index_filename]
argv.pop(index_filename)
argv.pop(index)
def get_option(argc, argv):
if argc <= 1:
return Option.NONE
return options.get(argv[1].lower(), Option.UNKNOWN)
def check_file_gitignore(option):
if option == Option.CREATE:
create_file()
elif os.path.isfile(name_gitignore):
parse_file(name_gitignore)
elif option == Option.ADD:
create_file()
else:
return False
return True
def create_file():
open(name_gitignore, 'w')
print('%s created' % name_gitignore)
def exit_invalid_arguments(option_name):
sys.exit('Error: invalid arguments for "%s"' % option_name)
def option_add(argc, argv):
if argc < 3:
exit_invalid_arguments(argv[1])
for name in argv[2:]:
add(name)
def option_remove(argc, argv):
if argc < 3:
exit_invalid_arguments(argv[1])
for name in argv[2:]:
remove(name)
def get_re_gitignore(tag):
return re.compile(r'^\s*#+\s*gitignore-%s:([!-.0-~]+(/|\\))*([!-.0-~]+)\s*$' % tag)
re_start = get_re_gitignore('start')
def parse_file(filename):
errors = []
with open(filename, 'r') as f:
for line in f:
m = re_start.match(line)
if m is None:
junk_lines.append(line)
else:
name = m.group(3)
parse_gitignore(f, name)
if name.lower() not in names:
errors.append(name)
if len(errors) > 0:
print('\tInvalid %s file:' % name_gitignore)
for n in errors:
error_unknown_gitignore(n)
sys.exit(1)
if len(junk_lines) > 0:
last_line = junk_lines[-1]
if last_line[-1] != '\n':
junk_lines[-1] = last_line + os.linesep
re_end = get_re_gitignore('end')
def parse_gitignore(f, name):
gitignore_lines = []
for line in f:
m = re_end.match(line)
if m is None or m.group(3) != name:
gitignore_lines.append(line)
else:
gitignores.update({name.lower(): gitignore_lines})
return
print('\tInvalid %s file:' % name_gitignore)
sys.exit('Error: the start tag for "%s" is not matched by a corresponding end tag' % name)
def get_gitignore_tag(tag, name):
return '##gitignore-%s:%s\n' % (tag, name)
def write_file(filename):
f = open(filename, 'w')
f.writelines(junk_lines)
for name, lines in gitignores.items():
f.write(get_gitignore_tag('start', name))
f.writelines(lines)
f.write(get_gitignore_tag('end', name))
f.close()
def close_similarity(s1, s2):
if abs(len(s1) - len(s2)) > 2:
return False
set1, set2 = set(s1), set(s2)
len_set1, len_set2 = len(set1), len(set2)
if abs(len_set1 - len_set2) > 2:
return False
common_letters = set1.intersection(set2)
len_cl = len(common_letters)
return len_set1 - len_cl < 2 and len_set2 - len_cl < 2
def get_similar_names(name, list_names):
return [n for n in list_names if close_similarity(name, n)]
def print_similar_names(name, list_names):
similar_names = get_similar_names(name, list_names)
if len(similar_names) > 1:
print('Did you mean one of these?')
for n in similar_names: print('\t' + n)
elif len(similar_names) == 1:
print('Did you mean this?')
print('\t' + similar_names[0])
def error_unknown_gitignore(name):
print('Error: unknown gitignore "%s"' % name)
print_similar_names(name, names.keys())
def add(name):
lower = name.lower()
if lower in names:
update_gitignores(lower)
else:
error_unknown_gitignore(name)
def update_gitignores(name):
if name in used_gitignores:
return
used_gitignores.append(name)
updated = name in gitignores.keys()
gitignores.update({name: get_item_lines(name)})
print('%s %s' % (name, 'updated' if updated else 'added'))
def get_item_lines(name):
if local_path is not None:
url = local_path + names[name] + '.gitignore'
with open(url, 'r') as f:
lines = f.readlines()
else:
url = online_path + names[name] + '.gitignore'
if py_v3:
data = urllib.request.urlopen(url).readlines()
else:
data = urllib2.urlopen(url).readlines()
lines = [line.decode('utf-8') for line in data]
last_line = lines[-1]
if last_line[-1] != '\n':
lines[-1] = last_line + os.linesep
check_gitignore_links(lines, name, update_gitignores)
return lines
re_gitignore_link = re.compile(r'^(#|\s)*([!-.0-~]+/)*([!-.0-~]+)\.gitignore\s*$')
def check_gitignore_links(lines, linker, func):
for line in lines:
m = re_gitignore_link.match(line)
if m is None:
continue
name = m.group(3).lower()
if name not in used_gitignores:
print('%s -> %s' % (linker, name))
if name not in names:
error_unknown_gitignore(name)
continue
func(name)
def remove(name):
lower = name.lower()
if lower in used_gitignores:
return
used_gitignores.append(lower)
if lower not in names:
error_unknown_gitignore(name)
return
try:
check_gitignore_links(gitignores[lower], lower, remove)
gitignores.pop(lower)
print('%s removed' % name)
except KeyError:
print('Error: %s not in file' % name)
def option_update(argc, argv):
if argc != 2:
exit_invalid_arguments(argv[1])
for name in gitignores.keys():
update_gitignores(name)
def option_clear(argc, argv):
if argc != 2:
exit_invalid_arguments(argv[1])
gitignores.clear()
print('file cleared')
def option_list(argc, argv):
if argc != 2:
exit_invalid_arguments(argv[1])
l = list(gitignores.keys())
l.sort()
print(l)
class OptionLocal:
NONE = 0
UNKNOWN = 1
SET = 2
RESET = 3
SHOW = 4
CALL = 5
options_local = {
'set': OptionLocal.SET,
'reset': OptionLocal.RESET,
'show': OptionLocal.SHOW,
'call': OptionLocal.CALL,
}
def print_local_suboptions():
print(' set Sets a local directory to fetch gitignore templates from')
print(' reset Resets the local directory to None')
print(' show Shows the local path')
print(' call Calls a command in the local directory')
def option_local(argc, argv):
option = get_option_local(argc, argv)
if option == OptionLocal.NONE:
print('Error: no %s suboption provided' % argv[1])
print('Suboptions are:')
print_local_suboptions()
sys.exit(1)
elif option == OptionLocal.UNKNOWN:
print('Error: unknown %s suboption "%s"' % (argv[1], argv[2]))
print_similar_names(argv[2].lower(), options_local.keys())
sys.exit(1)
elif option == OptionLocal.SET:
option_local_set(argc, argv)
elif option == OptionLocal.RESET:
option_local_reset(argc, argv)
elif option == OptionLocal.SHOW:
option_local_show(argc, argv)
elif option == OptionLocal.CALL:
option_local_call(argc, argv)
def get_option_local(argc, argv):
if argc <= 2:
return OptionLocal.NONE
return options_local.get(argv[2].lower(), OptionLocal.UNKNOWN)
def option_local_set(argc, argv):
if argc != 4:
exit_invalid_arguments('%s %s' % (argv[1], argv[2]))
new_local_path = os.path.abspath(argv[3])
if new_local_path[-1] != os.sep:
new_local_path += os.sep
set_names_local(new_local_path)
set_local_path(new_local_path)
print('local path set to "%s"' % new_local_path)
def option_local_reset(argc, argv):
if argc != 3:
exit_invalid_arguments('%s %s' % (argv[1], argv[2]))
set_local_path(None)
print('local path reset')
def option_local_show(argc, argv):
if argc != 3:
exit_invalid_arguments('%s %s' % (argv[1], argv[2]))
if local_path is None:
print('local path is not set')
else:
print('local path set to "%s"' % local_path)
def option_local_call(argc, argv):
if local_path is None:
sys.exit('Error: local path is not set')
try:
curr_dir = os.getcwd()
os.chdir(local_path)
print('Output of the call with parameters %s:\n' % argv[3:])
print(call(argv[3:]))
os.chdir(curr_dir)
except (LookupError) as e:
sys.exit('Error: %s' % e)
except (TypeError, IOError, OSError) as e:
sys.exit('Error: local path or command is invalid')
def set_local_path(path):
with open(__file__, 'r') as f:
lines = f.readlines()
if path is None:
lines[local_path_line] = 'local_path = None\n'
else:
lines[local_path_line] = "local_path = '%s'\n" % path.replace('\\', '\\\\')
with open(__file__, 'w') as f:
f.writelines(lines)
def set_names_local(path):
try:
curr_dir = os.getcwd()
names.clear()
os.chdir(path)
add_names_local('.')
os.chdir(curr_dir)
except (LookupError) as e:
sys.exit('Error: %s' % e)
except (TypeError, IOError, OSError) as e:
sys.exit('Error: local path is invalid')
re_gitignore_file = re.compile(r'([^.]+)\.gitignore')
def check_local_file(subdir, path_name):
m = re_gitignore_file.match(path_name)
if m is not None:
name = m.group(1)
lower = m.group(1).lower()
if lower in names:
raise LookupError('conflicting "%s" templates in local directory' % lower)
if subdir == '.':
names.update({lower: name})
else: # skip the ./
names.update({lower: subdir[2:] + os.sep + name})
def add_names_local(subdir):
if py_v3:
for path in os.scandir(subdir):
path_name = path.name
if path.is_file():
check_local_file(subdir, path_name)
elif path.is_dir():
add_names_local(subdir + os.sep + path_name)
else:
for path_name in os.listdir(subdir):
path = os.path.join(subdir, path_name)
if os.path.isfile(path):
check_local_file(subdir, path_name)
elif os.path.isdir(path):
add_names_local(path)
su_def_stage = 'prod'
su_def_alpha = 0
re_version = re.compile(r".*\[\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*(,\s*'(dev|prod)'\s*(,\s*(\d+)\s*)?)?,?\s*\]")
def get_new_version(lines):
m = re_version.match(lines[version_line])
if m is None:
return None
major = int(m.group(1))
minor = int(m.group(2))
patch = int(m.group(3))
stage = m.group(5) or su_def_stage
alpha = su_def_alpha if m.group(7) is None else int(m.group(7))
return [major, minor, patch, stage, alpha]
su_up_to_date = 'already up to date'
su_successful = 'self-updated successfully'
def option_self_update(argc, argv):
if argc != 2:
exit_invalid_arguments(argv[1])
with open(__file__, 'r') as f:
old_lines = f.readlines()
if py_v3:
data = urllib.request.urlopen(self_path).readlines()
else:
data = urllib2.urlopen(self_path).readlines()
lines = [line.decode('utf-8') for line in data]
new_v = get_new_version(lines)
if new_v is None:
write_self_update(lines, local_path)
with open(__file__, 'r') as f:
new_lines = f.readlines()
if old_lines == new_lines:
print(su_up_to_date)
else:
print(su_successful)
else:
if len(version) < 5:
if len(version) < 4:
version.append(su_def_stage)
version.append(su_def_alpha)
if version == new_v:
print(su_up_to_date)
sys.exit()
if version[0] != new_v[0]:
self_update_warning('Warning: new version is incompatible with current version.')
if (version[3] != new_v[3] and new_v[3] != 'prod'):
self_update_warning('Warning: new version is not a production version.')
write_self_update(lines, local_path)
print(su_successful)
def write_self_update(lines, local_path):
with open(__file__, 'w') as f:
f.writelines(lines)
set_local_path(local_path)
def self_update_warning(warning):
while True:
sys.stdout.write(warning + ' Continue? (y/n) ')
sys.stdout.flush()
a = sys.stdin.readline().strip()[:1].lower()
if (a == 'y'):
break;
if (a == 'n'):
print('self-update cancelled')
sys.exit()
names = {'nanoc':'Nanoc','webmethods':'Global/WebMethods','commonlisp':'CommonLisp','xcode':'Global/Xcode','sublimetext':'Global/SublimeText','bricxcc':'Global/BricxCC','lemonstand':'LemonStand','concrete5':'Concrete5','go':'Go','jdeveloper':'Global/JDeveloper','ros':'ROS','zephir':'Zephir','kate':'Global/Kate','typo3':'Typo3','anjuta':'Global/Anjuta','cakephp':'CakePHP','textpattern':'Textpattern','elm':'Elm','modelsim':'Global/ModelSim','momentics':'Global/Momentics','fortran':'Fortran','gcov':'Gcov','ada':'Ada','libreoffice':'Global/LibreOffice','erlang':'Erlang','yeoman':'Yeoman','dm':'DM','playframework':'PlayFramework','python':'Python','monodevelop':'Global/MonoDevelop','dart':'Dart','craftcms':'CraftCMS','julia':'Julia','ninja':'Global/Ninja','vim':'Global/Vim','qt':'Qt','eiffelstudio':'Global/EiffelStudio','tortoisegit':'Global/TortoiseGit','d':'D','mercurial':'Global/Mercurial','c++':'C++','gradle':'Gradle','rhodesrhomobile':'RhodesRhomobile','xilinxise':'Global/XilinxISE','sketchup':'SketchUp','chefcookbook':'ChefCookbook','kohana':'Kohana','netbeans':'Global/NetBeans','packer':'Packer','elisp':'Elisp','tex':'TeX','sdcc':'Sdcc','turbogears2':'TurboGears2','virtualenv':'Global/VirtualEnv','scons':'SCons','scala':'Scala','delphi':'Delphi','unrealengine':'UnrealEngine','redis':'Global/Redis','jboss':'Jboss','lua':'Lua','zendframework':'ZendFramework','stata':'Global/Stata','visualstudio':'VisualStudio','eagle':'Eagle','appceleratortitanium':'AppceleratorTitanium','sbt':'Global/SBT','tags':'Global/Tags','opencart':'OpenCart','processing':'Processing','maven':'Maven','redcar':'Global/Redcar','elixir':'Elixir','bazaar':'Global/Bazaar','swift':'Swift','laravel':'Laravel','c':'C','drupal':'Drupal','synopsysvcs':'Global/SynopsysVCS','extjs':'ExtJs','ocaml':'OCaml','stella':'Stella','joomla':'Joomla','appengine':'AppEngine','waf':'Waf','clojure':'Clojure','lilypond':'Lilypond','symfony':'Symfony','yii':'Yii','sugarcrm':'SugarCRM','microsoftoffice':'Global/MicrosoftOffice','fancy':'Fancy','jenv':'Global/JEnv','terraform':'Terraform','haskell':'Haskell','cloud9':'Global/Cloud9','composer':'Composer','linux':'Global/Linux','fuelphp':'FuelPHP','archlinuxpackages':'ArchLinuxPackages','plone':'Plone','phalcon':'Phalcon','cfwheels':'CFWheels','mercury':'Mercury','java':'Java','codeigniter':'CodeIgniter','symphonycms':'SymphonyCMS','gpg':'Global/GPG','episerver':'EPiServer','slickedit':'Global/SlickEdit','rails':'Rails','perl':'Perl','emacs':'Global/Emacs','archives':'Global/Archives','dreamweaver':'Global/Dreamweaver','expressionengine':'ExpressionEngine','wordpress':'WordPress','scheme':'Scheme','matlab':'Global/Matlab','coq':'Coq','kdevelop4':'Global/KDevelop4','notepadpp':'Global/NotepadPP','macos':'Global/macOS','lazarus':'Global/Lazarus','ruby':'Ruby','jetbrains':'Global/JetBrains','igorpro':'IGORPro','eclipse':'Global/Eclipse','cvs':'Global/CVS','labview':'LabVIEW','r':'R','magento':'Magento','rust':'Rust','lyx':'Global/LyX','objective-c':'Objective-C','ansible':'Global/Ansible','oracleforms':'OracleForms','visualstudiocode':'Global/VisualStudioCode','xojo':'Xojo','smalltalk':'Smalltalk','metaprogrammingsystem':'MetaProgrammingSystem','umbraco':'Umbraco','cmake':'CMake','vvvv':'VVVV','kicad':'KiCad','scrivener':'Scrivener','gwt':'GWT','vagrant':'Global/Vagrant','leiningen':'Leiningen','prestashop':'Prestashop','unity':'Unity','otto':'Global/Otto','actionscript':'Actionscript','android':'Android','lithium':'Lithium','codekit':'Global/CodeKit','qooxdoo':'Qooxdoo','cuda':'CUDA','node':'Node','nim':'Nim','finale':'Finale','gitbook':'GitBook','flexbuilder':'Global/FlexBuilder','darteditor':'Global/DartEditor','dropbox':'Global/Dropbox','idris':'Idris','calabash':'Global/Calabash','agda':'Agda','svn':'Global/SVN','autotools':'Autotools','ensime':'Global/Ensime','sass':'Sass','jekyll':'Jekyll','windows':'Global/Windows','seamgen':'SeamGen','grails':'Grails','purescript':'PureScript','espresso':'Global/Espresso','textmate':'Global/TextMate','forcedotcom':'ForceDotCom','opa':'Opa'}
msg_error = 'Error: there was an error'
msg_error_url = 'maybe there is no Internet connection'
msg_error_permission = 'make sure you can write on the executable file'
if __name__ == '__main__':
if py_v3:
try:
main(len(sys.argv), sys.argv)
except urllib.error.URLError:
print(msg_error + ' -- ' + msg_error_url)
except PermissionError:
print('Error: permission denied -- ' + msg_error_permission)
else:
try:
main(len(sys.argv), sys.argv)
except urllib2.URLError:
print(msg_error + ' -- ' + msg_error_url)
except IOError:
print(msg_error + ' -- ' + msg_error_permission)
Improve messages
Resolves: #7
#!/usr/bin/env python
# Copyright 2017 Patrick Laughrea
# Licensed under the Apache License, Version 2.0
local_path = None
local_path_line = 4 # for local set and reset, 0-based index
online_path = 'https://raw.githubusercontent.com/github/gitignore/master/'
self_path = 'https://raw.githubusercontent.com/pat-laugh/gitignore-modifier/master/gitignore.py'
version = [1, 5, 0, 'dev', 7]
version_line = 8
import sys, os, re
from subprocess import call
py_v3 = sys.version_info[0] == 3
if py_v3:
import urllib.request
else:
import urllib2
name_gitignore = '.gitignore'
junk_lines = []
gitignores = {}
used_gitignores = []
class Option:
NONE = 0
UNKNOWN = 1
ADD = 2
CREATE = 3
REMOVE = 4
UPDATE = 5
CLEAR = 6
LOCAL = 7
LIST = 8
SELF_UPDATE = 9
options = {
'add': Option.ADD,
'create': Option.CREATE,
'remove': Option.REMOVE,
'update': Option.UPDATE,
'clear': Option.CLEAR,
'local': Option.LOCAL,
'list': Option.LIST,
'self-update': Option.SELF_UPDATE,
}
def print_options():
print(' add Adds templates to the .gitignore file')
print(' create Creates a new .gitignore file')
print(' remove Removes templates from the .gitignore file')
print(' update Updates each template in the .gitignore file')
print(' clear Removes all templates from the .gitignore file')
print(' ------------------------------------------------------------------------')
print(' local Local has the following suboptions:')
print_local_suboptions()
print(' ------------------------------------------------------------------------')
print(' list Prints a sorted list of all templates in the .gitignore file')
print(' self-update Updates this program')
def main(argc, argv):
check_modifiers(argv)
option = get_option(argc, argv)
if option == Option.NONE:
print('Error: no argument provided.')
print('Options are:')
print_options()
sys.exit(1)
elif option == Option.UNKNOWN:
print('Error: unknown option "%s".' % argv[1])
print_similar_names(argv[1].lower(), options.keys())
sys.exit(1)
elif option == Option.LOCAL:
option_local(argc, argv)
sys.exit(0)
elif option == Option.SELF_UPDATE:
option_self_update(argc, argv)
sys.exit(0)
if local_path is not None:
set_names_local(local_path)
if not check_file_gitignore(option):
sys.exit('Error: no %s file found.' % name_gitignore)
if option == Option.ADD:
option_add(argc, argv)
elif option == Option.CREATE:
if argc == 2:
sys.exit(0)
option_add(argc, argv)
elif option == Option.REMOVE:
option_remove(argc, argv)
elif option == Option.UPDATE:
option_update(argc, argv)
elif option == Option.CLEAR:
option_clear(argc, argv)
elif option == Option.LIST:
option_list(argc, argv)
sys.exit(0)
write_file(name_gitignore)
def check_modifiers(argv):
if ('-f' in argv):
modifier_file(argv, argv.index('-f'))
elif ('--file' in argv):
modifier_file(argv, argv.index('--file'))
def modifier_file(argv, index):
global name_gitignore
index_filename = index + 1;
if (index_filename >= len(argv)):
sys.exit('Error: a filename must be provided for modifier -f or --file.')
name_gitignore = argv[index_filename]
argv.pop(index_filename)
argv.pop(index)
def get_option(argc, argv):
if argc <= 1:
return Option.NONE
return options.get(argv[1].lower(), Option.UNKNOWN)
def check_file_gitignore(option):
if option == Option.CREATE:
create_file()
elif os.path.isfile(name_gitignore):
parse_file(name_gitignore)
elif option == Option.ADD:
create_file()
else:
return False
return True
def create_file():
open(name_gitignore, 'w')
print('%s created' % name_gitignore)
def exit_invalid_arguments(option_name):
sys.exit('Error: invalid arguments for "%s".' % option_name)
def option_add(argc, argv):
if argc < 3:
exit_invalid_arguments(argv[1])
for name in argv[2:]:
add(name)
def option_remove(argc, argv):
if argc < 3:
exit_invalid_arguments(argv[1])
for name in argv[2:]:
remove(name)
def get_re_gitignore(tag):
return re.compile(r'^\s*#+\s*gitignore-%s:([!-.0-~]+(/|\\))*([!-.0-~]+)\s*$' % tag)
re_start = get_re_gitignore('start')
def parse_file(filename):
errors = []
with open(filename, 'r') as f:
for line in f:
m = re_start.match(line)
if m is None:
junk_lines.append(line)
else:
name = m.group(3)
parse_gitignore(f, name)
if name.lower() not in names:
errors.append(name)
if len(errors) > 0:
if len(errors) == 1:
print('There was an error while parsing the %s file.' % name_gitignore)
error_unknown_gitignore(errors[0])
sys.exit(1)
print('There were multiple errors while parsing the %s file.' % name_gitignore)
for n in errors:
error_unknown_gitignore(n)
sys.exit(1)
if len(junk_lines) > 0:
last_line = junk_lines[-1]
if last_line[-1] != '\n':
junk_lines[-1] = last_line + os.linesep
re_end = get_re_gitignore('end')
def parse_gitignore(f, name):
gitignore_lines = []
for line in f:
m = re_end.match(line)
if m is None or m.group(3) != name:
gitignore_lines.append(line)
else:
gitignores.update({name.lower(): gitignore_lines})
return
print('There was an error while parsing the %s file.' % name_gitignore)
sys.exit('Error: the start tag for "%s" is not matched by a corresponding end tag.' % name)
def get_gitignore_tag(tag, name):
return '##gitignore-%s:%s\n' % (tag, name)
def write_file(filename):
f = open(filename, 'w')
f.writelines(junk_lines)
for name, lines in gitignores.items():
f.write(get_gitignore_tag('start', name))
f.writelines(lines)
f.write(get_gitignore_tag('end', name))
f.close()
def close_similarity(s1, s2):
if abs(len(s1) - len(s2)) > 2:
return False
set1, set2 = set(s1), set(s2)
len_set1, len_set2 = len(set1), len(set2)
if abs(len_set1 - len_set2) > 2:
return False
common_letters = set1.intersection(set2)
len_cl = len(common_letters)
return len_set1 - len_cl < 2 and len_set2 - len_cl < 2
def get_similar_names(name, list_names):
return [n for n in list_names if close_similarity(name, n)]
def print_similar_names(name, list_names):
similar_names = get_similar_names(name, list_names)
if len(similar_names) > 1:
print('Did you mean one of these?')
for n in similar_names: print('\t' + n)
elif len(similar_names) == 1:
print('Did you mean this?')
print('\t' + similar_names[0])
def error_unknown_gitignore(name):
print('Error: unknown gitignore "%s"' % name)
print_similar_names(name, names.keys())
def add(name):
lower = name.lower()
if lower in names:
update_gitignores(lower)
else:
error_unknown_gitignore(name)
def update_gitignores(name):
if name in used_gitignores:
return
used_gitignores.append(name)
updated = name in gitignores.keys()
gitignores.update({name: get_item_lines(name)})
print('%s %s' % (name, 'updated' if updated else 'added'))
def get_item_lines(name):
if local_path is not None:
url = local_path + names[name] + '.gitignore'
with open(url, 'r') as f:
lines = f.readlines()
else:
url = online_path + names[name] + '.gitignore'
if py_v3:
data = urllib.request.urlopen(url).readlines()
else:
data = urllib2.urlopen(url).readlines()
lines = [line.decode('utf-8') for line in data]
last_line = lines[-1]
if last_line[-1] != '\n':
lines[-1] = last_line + os.linesep
check_gitignore_links(lines, name, update_gitignores)
return lines
re_gitignore_link = re.compile(r'^(#|\s)*([!-.0-~]+/)*([!-.0-~]+)\.gitignore\s*$')
def check_gitignore_links(lines, linker, func):
for line in lines:
m = re_gitignore_link.match(line)
if m is None:
continue
name = m.group(3).lower()
if name not in used_gitignores:
print('%s -> %s' % (linker, name))
if name not in names:
error_unknown_gitignore(name)
continue
func(name)
def remove(name):
lower = name.lower()
if lower in used_gitignores:
return
used_gitignores.append(lower)
if lower not in names:
error_unknown_gitignore(name)
return
try:
check_gitignore_links(gitignores[lower], lower, remove)
gitignores.pop(lower)
print('%s removed' % name)
except KeyError:
print('Error: %s not in file.' % name)
def option_update(argc, argv):
if argc != 2:
exit_invalid_arguments(argv[1])
for name in gitignores.keys():
update_gitignores(name)
def option_clear(argc, argv):
if argc != 2:
exit_invalid_arguments(argv[1])
gitignores.clear()
print('File cleared.')
def option_list(argc, argv):
if argc != 2:
exit_invalid_arguments(argv[1])
l = list(gitignores.keys())
l.sort()
print(l)
class OptionLocal:
NONE = 0
UNKNOWN = 1
SET = 2
RESET = 3
SHOW = 4
CALL = 5
options_local = {
'set': OptionLocal.SET,
'reset': OptionLocal.RESET,
'show': OptionLocal.SHOW,
'call': OptionLocal.CALL,
}
def print_local_suboptions():
print(' set Sets a local directory to fetch gitignore templates from')
print(' reset Resets the local directory to None')
print(' show Shows the local path')
print(' call Calls a command in the local directory')
def option_local(argc, argv):
option = get_option_local(argc, argv)
if option == OptionLocal.NONE:
print('Error: no %s suboption provided.' % argv[1])
print('Suboptions are:')
print_local_suboptions()
sys.exit(1)
elif option == OptionLocal.UNKNOWN:
print('Error: unknown %s suboption "%s".' % (argv[1], argv[2]))
print_similar_names(argv[2].lower(), options_local.keys())
sys.exit(1)
elif option == OptionLocal.SET:
option_local_set(argc, argv)
elif option == OptionLocal.RESET:
option_local_reset(argc, argv)
elif option == OptionLocal.SHOW:
option_local_show(argc, argv)
elif option == OptionLocal.CALL:
option_local_call(argc, argv)
def get_option_local(argc, argv):
if argc <= 2:
return OptionLocal.NONE
return options_local.get(argv[2].lower(), OptionLocal.UNKNOWN)
def option_local_set(argc, argv):
if argc != 4:
exit_invalid_arguments('%s %s' % (argv[1], argv[2]))
new_local_path = os.path.abspath(argv[3])
if new_local_path[-1] != os.sep:
new_local_path += os.sep
set_names_local(new_local_path)
set_local_path(new_local_path)
print('Local path set to "%s".' % new_local_path)
def option_local_reset(argc, argv):
if argc != 3:
exit_invalid_arguments('%s %s' % (argv[1], argv[2]))
set_local_path(None)
print('Local path reset.')
def option_local_show(argc, argv):
if argc != 3:
exit_invalid_arguments('%s %s' % (argv[1], argv[2]))
if local_path is None:
print('Local path is not set.')
else:
print(local_path)
def option_local_call(argc, argv):
if local_path is None:
sys.exit('Error: local path is not set.')
try:
curr_dir = os.getcwd()
os.chdir(local_path)
print('Output of the call with parameters %s:\n' % argv[3:])
print(call(argv[3:]))
os.chdir(curr_dir)
except (LookupError) as e:
sys.exit('Error: %s.' % e)
except (TypeError, IOError, OSError) as e:
sys.exit('Error: local path or command is invalid.')
def set_local_path(path):
with open(__file__, 'r') as f:
lines = f.readlines()
if path is None:
lines[local_path_line] = 'local_path = None\n'
else:
lines[local_path_line] = "local_path = '%s'\n" % path.replace('\\', '\\\\')
with open(__file__, 'w') as f:
f.writelines(lines)
def set_names_local(path):
try:
curr_dir = os.getcwd()
names.clear()
os.chdir(path)
add_names_local('.')
os.chdir(curr_dir)
except (LookupError) as e:
sys.exit('Error: %s.' % e)
except (TypeError, IOError, OSError) as e:
sys.exit('Error: local path is invalid.')
re_gitignore_file = re.compile(r'([^.]+)\.gitignore')
def check_local_file(subdir, path_name):
m = re_gitignore_file.match(path_name)
if m is not None:
name = m.group(1)
lower = m.group(1).lower()
if lower in names:
raise LookupError('conflicting "%s" templates in local directory' % lower)
if subdir == '.':
names.update({lower: name})
else: # skip the ./
names.update({lower: subdir[2:] + os.sep + name})
def add_names_local(subdir):
if py_v3:
for path in os.scandir(subdir):
path_name = path.name
if path.is_file():
check_local_file(subdir, path_name)
elif path.is_dir():
add_names_local(subdir + os.sep + path_name)
else:
for path_name in os.listdir(subdir):
path = os.path.join(subdir, path_name)
if os.path.isfile(path):
check_local_file(subdir, path_name)
elif os.path.isdir(path):
add_names_local(path)
su_def_stage = 'prod'
su_def_alpha = 0
re_version = re.compile(r".*\[\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*(,\s*'(dev|prod)'\s*(,\s*(\d+)\s*)?)?,?\s*\]")
def get_new_version(lines):
m = re_version.match(lines[version_line])
if m is None:
return None
major = int(m.group(1))
minor = int(m.group(2))
patch = int(m.group(3))
stage = m.group(5) or su_def_stage
alpha = su_def_alpha if m.group(7) is None else int(m.group(7))
return [major, minor, patch, stage, alpha]
su_up_to_date = 'Already up to date.'
su_successful = 'Self-updated successfully.'
def option_self_update(argc, argv):
if argc != 2:
exit_invalid_arguments(argv[1])
with open(__file__, 'r') as f:
old_lines = f.readlines()
if py_v3:
data = urllib.request.urlopen(self_path).readlines()
else:
data = urllib2.urlopen(self_path).readlines()
lines = [line.decode('utf-8') for line in data]
new_v = get_new_version(lines)
if new_v is None:
write_self_update(lines, local_path)
with open(__file__, 'r') as f:
new_lines = f.readlines()
if old_lines == new_lines:
print(su_up_to_date)
else:
print(su_successful)
else:
if len(version) < 5:
if len(version) < 4:
version.append(su_def_stage)
version.append(su_def_alpha)
if version == new_v:
print(su_up_to_date)
sys.exit()
if version[0] != new_v[0]:
self_update_warning('Warning: new version is incompatible with current version.')
if (version[3] != new_v[3] and new_v[3] != 'prod'):
self_update_warning('Warning: new version is not a production version.')
write_self_update(lines, local_path)
print(su_successful)
def write_self_update(lines, local_path):
with open(__file__, 'w') as f:
f.writelines(lines)
set_local_path(local_path)
def self_update_warning(warning):
while True:
sys.stdout.write(warning + ' Continue? (y/n) ')
sys.stdout.flush()
a = sys.stdin.readline().strip()[:1].lower()
if (a == 'y'):
break;
if (a == 'n'):
print('Self-update cancelled.')
sys.exit()
names = {'nanoc':'Nanoc','webmethods':'Global/WebMethods','commonlisp':'CommonLisp','xcode':'Global/Xcode','sublimetext':'Global/SublimeText','bricxcc':'Global/BricxCC','lemonstand':'LemonStand','concrete5':'Concrete5','go':'Go','jdeveloper':'Global/JDeveloper','ros':'ROS','zephir':'Zephir','kate':'Global/Kate','typo3':'Typo3','anjuta':'Global/Anjuta','cakephp':'CakePHP','textpattern':'Textpattern','elm':'Elm','modelsim':'Global/ModelSim','momentics':'Global/Momentics','fortran':'Fortran','gcov':'Gcov','ada':'Ada','libreoffice':'Global/LibreOffice','erlang':'Erlang','yeoman':'Yeoman','dm':'DM','playframework':'PlayFramework','python':'Python','monodevelop':'Global/MonoDevelop','dart':'Dart','craftcms':'CraftCMS','julia':'Julia','ninja':'Global/Ninja','vim':'Global/Vim','qt':'Qt','eiffelstudio':'Global/EiffelStudio','tortoisegit':'Global/TortoiseGit','d':'D','mercurial':'Global/Mercurial','c++':'C++','gradle':'Gradle','rhodesrhomobile':'RhodesRhomobile','xilinxise':'Global/XilinxISE','sketchup':'SketchUp','chefcookbook':'ChefCookbook','kohana':'Kohana','netbeans':'Global/NetBeans','packer':'Packer','elisp':'Elisp','tex':'TeX','sdcc':'Sdcc','turbogears2':'TurboGears2','virtualenv':'Global/VirtualEnv','scons':'SCons','scala':'Scala','delphi':'Delphi','unrealengine':'UnrealEngine','redis':'Global/Redis','jboss':'Jboss','lua':'Lua','zendframework':'ZendFramework','stata':'Global/Stata','visualstudio':'VisualStudio','eagle':'Eagle','appceleratortitanium':'AppceleratorTitanium','sbt':'Global/SBT','tags':'Global/Tags','opencart':'OpenCart','processing':'Processing','maven':'Maven','redcar':'Global/Redcar','elixir':'Elixir','bazaar':'Global/Bazaar','swift':'Swift','laravel':'Laravel','c':'C','drupal':'Drupal','synopsysvcs':'Global/SynopsysVCS','extjs':'ExtJs','ocaml':'OCaml','stella':'Stella','joomla':'Joomla','appengine':'AppEngine','waf':'Waf','clojure':'Clojure','lilypond':'Lilypond','symfony':'Symfony','yii':'Yii','sugarcrm':'SugarCRM','microsoftoffice':'Global/MicrosoftOffice','fancy':'Fancy','jenv':'Global/JEnv','terraform':'Terraform','haskell':'Haskell','cloud9':'Global/Cloud9','composer':'Composer','linux':'Global/Linux','fuelphp':'FuelPHP','archlinuxpackages':'ArchLinuxPackages','plone':'Plone','phalcon':'Phalcon','cfwheels':'CFWheels','mercury':'Mercury','java':'Java','codeigniter':'CodeIgniter','symphonycms':'SymphonyCMS','gpg':'Global/GPG','episerver':'EPiServer','slickedit':'Global/SlickEdit','rails':'Rails','perl':'Perl','emacs':'Global/Emacs','archives':'Global/Archives','dreamweaver':'Global/Dreamweaver','expressionengine':'ExpressionEngine','wordpress':'WordPress','scheme':'Scheme','matlab':'Global/Matlab','coq':'Coq','kdevelop4':'Global/KDevelop4','notepadpp':'Global/NotepadPP','macos':'Global/macOS','lazarus':'Global/Lazarus','ruby':'Ruby','jetbrains':'Global/JetBrains','igorpro':'IGORPro','eclipse':'Global/Eclipse','cvs':'Global/CVS','labview':'LabVIEW','r':'R','magento':'Magento','rust':'Rust','lyx':'Global/LyX','objective-c':'Objective-C','ansible':'Global/Ansible','oracleforms':'OracleForms','visualstudiocode':'Global/VisualStudioCode','xojo':'Xojo','smalltalk':'Smalltalk','metaprogrammingsystem':'MetaProgrammingSystem','umbraco':'Umbraco','cmake':'CMake','vvvv':'VVVV','kicad':'KiCad','scrivener':'Scrivener','gwt':'GWT','vagrant':'Global/Vagrant','leiningen':'Leiningen','prestashop':'Prestashop','unity':'Unity','otto':'Global/Otto','actionscript':'Actionscript','android':'Android','lithium':'Lithium','codekit':'Global/CodeKit','qooxdoo':'Qooxdoo','cuda':'CUDA','node':'Node','nim':'Nim','finale':'Finale','gitbook':'GitBook','flexbuilder':'Global/FlexBuilder','darteditor':'Global/DartEditor','dropbox':'Global/Dropbox','idris':'Idris','calabash':'Global/Calabash','agda':'Agda','svn':'Global/SVN','autotools':'Autotools','ensime':'Global/Ensime','sass':'Sass','jekyll':'Jekyll','windows':'Global/Windows','seamgen':'SeamGen','grails':'Grails','purescript':'PureScript','espresso':'Global/Espresso','textmate':'Global/TextMate','forcedotcom':'ForceDotCom','opa':'Opa'}
msg_error = 'Error: there was an error'
msg_error_url = 'maybe there is no Internet connection'
msg_error_permission = 'make sure you can write on the executable file'
if __name__ == '__main__':
if py_v3:
try:
main(len(sys.argv), sys.argv)
except urllib.error.URLError:
print('%s -- %s.' % (msg_error, msg_error_url))
except PermissionError:
print('Error: permission denied -- %s.' % msg_error_permission)
else:
try:
main(len(sys.argv), sys.argv)
except urllib2.URLError:
print('%s -- %s.' % (msg_error, msg_error_url))
except IOError:
print('%s -- %s.' % (msg_error, msg_error_permission)) |
"""Run all doctests from modules on the command line. Use -v for verbose.
Example usages:
python doctests.py *.py
python doctests.py -v *.py
You can add more module-level tests with
__doc__ += "..."
You can add stochastic tests with
__doc__ += random_tests("...")
"""
if __name__ == "__main__":
import sys, glob, doctest
args = [arg for arg in sys.argv[1:] if arg != '-v']
if not args: args = ['*.py']
modules = [__import__(name.replace('.py',''))
for arg in args for name in glob.glob(arg)]
for module in modules:
doctest.testmod(module, report=1, optionflags=doctest.REPORT_UDIFF)
summary = doctest.master.summarize() if modules else (0, 0)
print '%d failed out of %d' % summary
Delete doctests.py -- use py.test from now on.
|
# coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from jormungandr import i_manager
from jormungandr.interfaces.v1.ResourceUri import ResourceUri
from jormungandr.interfaces.parsers import default_count_arg_type, DateTimeFormat, depth_argument
from jormungandr.interfaces.v1.decorators import get_obj_serializer
from jormungandr.interfaces.v1.errors import ManageError
from jormungandr.interfaces.v1.fields import fields, enum_type, NonNullList,\
NonNullNested, NonNullProtobufNested, PbField, error, pagination, NonNullString, \
feed_publisher, disruption_marshaller
from jormungandr.interfaces.v1.serializer import api
from datetime import datetime
import six
week_pattern = {
"monday": fields.Boolean(),
"tuesday": fields.Boolean(),
"wednesday": fields.Boolean(),
"thursday": fields.Boolean(),
"friday": fields.Boolean(),
"saturday": fields.Boolean(),
"sunday": fields.Boolean(),
}
calendar_period = {
"begin": fields.String(),
"end": fields.String(),
}
calendar_exception = {
"datetime": fields.String(attribute="date"),
"type": enum_type(),
}
validity_pattern = {
'beginning_date': fields.String(),
'days': fields.String(),
}
calendar = {
"id": NonNullString(attribute="uri"),
"name": NonNullString(),
"week_pattern": NonNullNested(week_pattern),
"active_periods": NonNullList(NonNullNested(calendar_period)),
"exceptions": NonNullList(NonNullNested(calendar_exception)),
"validity_pattern": NonNullProtobufNested(validity_pattern)
}
calendars = {
"calendars": NonNullList(NonNullNested(calendar)),
"error": PbField(error, attribute='error'),
"pagination": NonNullNested(pagination),
"disruptions": fields.List(NonNullNested(disruption_marshaller), attribute="impacts"),
"feed_publishers": fields.List(NonNullNested(feed_publisher))
}
class Calendars(ResourceUri):
def __init__(self):
ResourceUri.__init__(self, output_type_serializer=api.CalendarsSerializer)
parser_get = self.parsers["get"]
parser_get.add_argument("depth", type=depth_argument, default=1,
help="The depth of your object")
parser_get.add_argument("count", type=default_count_arg_type, default=10,
help="Number of calendars per page")
parser_get.add_argument("start_page", type=int, default=0,
help="The current page")
parser_get.add_argument("start_date", type=six.text_type, default="",
help="Start date")
parser_get.add_argument("end_date", type=six.text_type, default="",
help="End date")
parser_get.add_argument("forbidden_id[]", type=six.text_type, deprecated=True,
help="DEPRECATED, replaced by `forbidden_uris[]`",
dest="__temporary_forbidden_id[]",
default=[],
action='append',
schema_metadata={'format': 'pt-object'})
parser_get.add_argument("forbidden_uris[]", type=six.text_type,
help="forbidden uris",
dest="forbidden_uris[]",
default=[],
action="append",
schema_metadata={'format': 'pt-object'})
parser_get.add_argument("distance", type=int, default=200,
help="Distance range of the query. Used only if a coord is in the query")
parser_get.add_argument("_current_datetime", type=DateTimeFormat(),
schema_metadata={'default': 'now'}, hidden=True,
default=datetime.utcnow(),
help='The datetime considered as "now". Used for debug, default is '
'the moment of the request. It will mainly change the output '
'of the disruptions.')
self.collection = 'calendars'
self.collections = calendars
self.get_decorators.insert(0, ManageError())
self.get_decorators.insert(1, get_obj_serializer(self))
def options(self, **kwargs):
return self.api_description(**kwargs)
def get(self, region=None, lon=None, lat=None, uri=None, id=None):
self.region = i_manager.get_region(region, lon, lat)
args = self.parsers["get"].parse_args()
# for retrocompatibility purpose
for forbid_id in args['__temporary_forbidden_id[]']:
args['forbidden_uris[]'].append(forbid_id)
uris = []
if id:
args["filter"] = "calendar.uri=" + id
elif uri:
# Calendars of line
if uri[-1] == "/":
uri = uri[:-1]
uris = uri.split("/")
args["filter"] = self.get_filter(uris, args)
self._register_interpreted_parameters(args)
response = i_manager.dispatch(args, "calendars",
instance_name=self.region)
return response
jormungandr: rollback for get_filter in calendars
# coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from jormungandr import i_manager
from jormungandr.interfaces.v1.ResourceUri import ResourceUri
from jormungandr.interfaces.parsers import default_count_arg_type, DateTimeFormat, depth_argument
from jormungandr.interfaces.v1.decorators import get_obj_serializer
from jormungandr.interfaces.v1.errors import ManageError
from jormungandr.interfaces.v1.fields import fields, enum_type, NonNullList,\
NonNullNested, NonNullProtobufNested, PbField, error, pagination, NonNullString, \
feed_publisher, disruption_marshaller
from jormungandr.interfaces.v1.serializer import api
from datetime import datetime
import six
week_pattern = {
"monday": fields.Boolean(),
"tuesday": fields.Boolean(),
"wednesday": fields.Boolean(),
"thursday": fields.Boolean(),
"friday": fields.Boolean(),
"saturday": fields.Boolean(),
"sunday": fields.Boolean(),
}
calendar_period = {
"begin": fields.String(),
"end": fields.String(),
}
calendar_exception = {
"datetime": fields.String(attribute="date"),
"type": enum_type(),
}
validity_pattern = {
'beginning_date': fields.String(),
'days': fields.String(),
}
calendar = {
"id": NonNullString(attribute="uri"),
"name": NonNullString(),
"week_pattern": NonNullNested(week_pattern),
"active_periods": NonNullList(NonNullNested(calendar_period)),
"exceptions": NonNullList(NonNullNested(calendar_exception)),
"validity_pattern": NonNullProtobufNested(validity_pattern)
}
calendars = {
"calendars": NonNullList(NonNullNested(calendar)),
"error": PbField(error, attribute='error'),
"pagination": NonNullNested(pagination),
"disruptions": fields.List(NonNullNested(disruption_marshaller), attribute="impacts"),
"feed_publishers": fields.List(NonNullNested(feed_publisher))
}
class Calendars(ResourceUri):
def __init__(self):
ResourceUri.__init__(self, output_type_serializer=api.CalendarsSerializer)
parser_get = self.parsers["get"]
parser_get.add_argument("depth", type=depth_argument, default=1,
help="The depth of your object")
parser_get.add_argument("count", type=default_count_arg_type, default=10,
help="Number of calendars per page")
parser_get.add_argument("start_page", type=int, default=0,
help="The current page")
parser_get.add_argument("start_date", type=six.text_type, default="",
help="Start date")
parser_get.add_argument("end_date", type=six.text_type, default="",
help="End date")
parser_get.add_argument("forbidden_id[]", type=six.text_type, deprecated=True,
help="DEPRECATED, replaced by `forbidden_uris[]`",
dest="__temporary_forbidden_id[]",
default=[],
action='append',
schema_metadata={'format': 'pt-object'})
parser_get.add_argument("forbidden_uris[]", type=six.text_type,
help="forbidden uris",
dest="forbidden_uris[]",
default=[],
action="append",
schema_metadata={'format': 'pt-object'})
parser_get.add_argument("distance", type=int, default=200,
help="Distance range of the query. Used only if a coord is in the query")
parser_get.add_argument("_current_datetime", type=DateTimeFormat(),
schema_metadata={'default': 'now'}, hidden=True,
default=datetime.utcnow(),
help='The datetime considered as "now". Used for debug, default is '
'the moment of the request. It will mainly change the output '
'of the disruptions.')
self.collection = 'calendars'
self.collections = calendars
self.get_decorators.insert(0, ManageError())
self.get_decorators.insert(1, get_obj_serializer(self))
def options(self, **kwargs):
return self.api_description(**kwargs)
def get(self, region=None, lon=None, lat=None, uri=None, id=None):
self.region = i_manager.get_region(region, lon, lat)
args = self.parsers["get"].parse_args()
# for retrocompatibility purpose
for forbid_id in args['__temporary_forbidden_id[]']:
args['forbidden_uris[]'].append(forbid_id)
if id:
args["filter"] = "calendar.uri=" + id
elif uri:
# Calendars of line
if uri[-1] == "/":
uri = uri[:-1]
uris = uri.split("/")
args["filter"] = self.get_filter(uris, args)
else:
args["filter"] = ""
self._register_interpreted_parameters(args)
response = i_manager.dispatch(args, "calendars",
instance_name=self.region)
return response
|
# coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from flask.ext.restful import fields, marshal_with, reqparse
from jormungandr import i_manager
from jormungandr import timezone
from fields import stop_point, route, pagination, PbField, stop_date_time, \
additional_informations, stop_time_properties_links, display_informations_vj, \
display_informations_route, additional_informations_vj, UrisToLinks, error, \
enum_type, SplitDateTime, GeoJson
from ResourceUri import ResourceUri, complete_links
from datetime import datetime
from jormungandr.interfaces.argument import ArgumentDoc
from jormungandr.interfaces.parsers import option_value, date_time_format
from errors import ManageError
from flask.ext.restful.types import natural, boolean
class Schedules(ResourceUri):
parsers = {}
def __init__(self, endpoint):
super(Schedules, self).__init__()
self.endpoint = endpoint
self.parsers["get"] = reqparse.RequestParser(
argument_class=ArgumentDoc)
parser_get = self.parsers["get"]
parser_get.add_argument("filter", type=str)
parser_get.add_argument("from_datetime", type=date_time_format,
description="The datetime from which you want\
the schedules")
parser_get.add_argument("duration", type=int, default=3600 * 24,
description="Maximum duration between datetime\
and the retrieved stop time")
parser_get.add_argument("depth", type=int, default=2)
parser_get.add_argument("count", type=int, default=10,
description="Number of schedules per page")
parser_get.add_argument("start_page", type=int, default=0,
description="The current page")
parser_get.add_argument("max_date_times", type=natural,
description="Maximum number of schedule per\
stop_point/route")
parser_get.add_argument("forbidden_id[]", type=unicode,
description="forbidden ids",
dest="forbidden_uris[]",
action="append")
parser_get.add_argument("calendar", type=str,
description="Id of the calendar")
parser_get.add_argument("show_codes", type=boolean, default=False,
description="show more identification codes")
self.method_decorators.append(complete_links(self))
def get(self, uri=None, region=None, lon=None, lat=None):
args = self.parsers["get"].parse_args()
args["nb_stoptimes"] = args["count"]
args["interface_version"] = 1
if uri is None:
first_filter = args["filter"].lower().split("and")[0].strip()
parts = first_filter.lower().split("=")
if len(parts) != 2:
error = "Unable to parse filter {filter}"
return {"error": error.format(filter=args["filter"])}, 503
else:
self.region = i_manager.key_of_id(parts[1].strip())
else:
self.collection = 'schedules'
args["filter"] = self.get_filter(uri.split("/"))
self.region = i_manager.get_region(region, lon, lat)
#@TODO: Change to timestamp
if not args["from_datetime"]:
args["from_datetime"] = datetime.now().strftime("%Y%m%dT1337")
else:
args["from_datetime"] = args["from_datetime"].strftime("%Y%m%dT%H%M%S")
timezone.set_request_timezone(self.region)
return i_manager.dispatch(args, self.endpoint,
instance_name=self.region)
date_time = {
"date_time": SplitDateTime(date='date', time='time'),
"additional_informations": additional_informations(),
"links": stop_time_properties_links()
}
row = {
"stop_point": PbField(stop_point),
"date_times": fields.List(fields.Nested(date_time))
}
header = {
"display_informations": PbField(display_informations_vj,
attribute='pt_display_informations'),
"additional_informations": additional_informations_vj(),
"links": UrisToLinks()
}
table_field = {
"rows": fields.List(fields.Nested(row)),
"headers": fields.List(fields.Nested(header))
}
route_schedule_fields = {
"table": PbField(table_field),
"display_informations": PbField(display_informations_route,
attribute='pt_display_informations'),
"geojson" : GeoJson()
}
route_schedules = {
"error": PbField(error, attribute='error'),
"route_schedules": fields.List(fields.Nested(route_schedule_fields)),
"pagination": fields.Nested(pagination)
}
class RouteSchedules(Schedules):
def __init__(self):
super(RouteSchedules, self).__init__("route_schedules")
@marshal_with(route_schedules)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(RouteSchedules, self).get(uri=uri, region=region, lon=lon,
lat=lat)
stop_schedule = {
"stop_point": PbField(stop_point),
"route": PbField(route, attribute="route"),
"additional_informations": enum_type(attribute="response_status"),
"display_informations": PbField(display_informations_route,
attribute='pt_display_informations'),
"date_times": fields.List(fields.Nested(date_time)),
"links": UrisToLinks()
}
stop_schedules = {
"stop_schedules": fields.List(fields.Nested(stop_schedule)),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error')
}
class StopSchedules(Schedules):
def __init__(self):
super(StopSchedules, self).__init__("departure_boards")
self.parsers["get"].add_argument("interface_version", type=int,
default=1, hidden=True)
@marshal_with(stop_schedules)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(StopSchedules, self).get(uri=uri, region=region, lon=lon,
lat=lat)
passage = {
"route": PbField(route, attribute="vehicle_journey.route"),
"stop_point": PbField(stop_point),
"stop_date_time": PbField(stop_date_time)
}
departures = {
"departures": fields.List(fields.Nested(passage),
attribute="next_departures"),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error')
}
arrivals = {
"arrivals": fields.List(fields.Nested(passage), attribute="next_arrivals"),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error')
}
class NextDepartures(Schedules):
def __init__(self):
super(NextDepartures, self).__init__("next_departures")
@marshal_with(departures)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None,
dest="nb_stoptimes"):
return super(NextDepartures, self).get(uri=uri, region=region, lon=lon,
lat=lat)
class NextArrivals(Schedules):
def __init__(self):
super(NextArrivals, self).__init__("next_arrivals")
@marshal_with(arrivals)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(NextArrivals, self).get(uri=uri, region=region, lon=lon,
lat=lat)
Jormungandr: hotfix for stopschedule, we set a limit at 10000 results
# coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from flask.ext.restful import fields, marshal_with, reqparse
from jormungandr import i_manager
from jormungandr import timezone
from fields import stop_point, route, pagination, PbField, stop_date_time, \
additional_informations, stop_time_properties_links, display_informations_vj, \
display_informations_route, additional_informations_vj, UrisToLinks, error, \
enum_type, SplitDateTime, GeoJson
from ResourceUri import ResourceUri, complete_links
from datetime import datetime
from jormungandr.interfaces.argument import ArgumentDoc
from jormungandr.interfaces.parsers import option_value, date_time_format
from errors import ManageError
from flask.ext.restful.types import natural, boolean
class Schedules(ResourceUri):
parsers = {}
def __init__(self, endpoint):
super(Schedules, self).__init__()
self.endpoint = endpoint
self.parsers["get"] = reqparse.RequestParser(
argument_class=ArgumentDoc)
parser_get = self.parsers["get"]
parser_get.add_argument("filter", type=str)
parser_get.add_argument("from_datetime", type=date_time_format,
description="The datetime from which you want\
the schedules")
parser_get.add_argument("duration", type=int, default=3600 * 24,
description="Maximum duration between datetime\
and the retrieved stop time")
parser_get.add_argument("depth", type=int, default=2)
parser_get.add_argument("count", type=int, default=10,
description="Number of schedules per page")
parser_get.add_argument("start_page", type=int, default=0,
description="The current page")
parser_get.add_argument("max_date_times", type=natural, default=10000,
description="Maximum number of schedule per\
stop_point/route")
parser_get.add_argument("forbidden_id[]", type=unicode,
description="forbidden ids",
dest="forbidden_uris[]",
action="append")
parser_get.add_argument("calendar", type=str,
description="Id of the calendar")
parser_get.add_argument("show_codes", type=boolean, default=False,
description="show more identification codes")
self.method_decorators.append(complete_links(self))
def get(self, uri=None, region=None, lon=None, lat=None):
args = self.parsers["get"].parse_args()
args["nb_stoptimes"] = args["count"]
args["interface_version"] = 1
if uri is None:
first_filter = args["filter"].lower().split("and")[0].strip()
parts = first_filter.lower().split("=")
if len(parts) != 2:
error = "Unable to parse filter {filter}"
return {"error": error.format(filter=args["filter"])}, 503
else:
self.region = i_manager.key_of_id(parts[1].strip())
else:
self.collection = 'schedules'
args["filter"] = self.get_filter(uri.split("/"))
self.region = i_manager.get_region(region, lon, lat)
#@TODO: Change to timestamp
if not args["from_datetime"]:
args["from_datetime"] = datetime.now().strftime("%Y%m%dT1337")
else:
args["from_datetime"] = args["from_datetime"].strftime("%Y%m%dT%H%M%S")
timezone.set_request_timezone(self.region)
return i_manager.dispatch(args, self.endpoint,
instance_name=self.region)
date_time = {
"date_time": SplitDateTime(date='date', time='time'),
"additional_informations": additional_informations(),
"links": stop_time_properties_links()
}
row = {
"stop_point": PbField(stop_point),
"date_times": fields.List(fields.Nested(date_time))
}
header = {
"display_informations": PbField(display_informations_vj,
attribute='pt_display_informations'),
"additional_informations": additional_informations_vj(),
"links": UrisToLinks()
}
table_field = {
"rows": fields.List(fields.Nested(row)),
"headers": fields.List(fields.Nested(header))
}
route_schedule_fields = {
"table": PbField(table_field),
"display_informations": PbField(display_informations_route,
attribute='pt_display_informations'),
"geojson" : GeoJson()
}
route_schedules = {
"error": PbField(error, attribute='error'),
"route_schedules": fields.List(fields.Nested(route_schedule_fields)),
"pagination": fields.Nested(pagination)
}
class RouteSchedules(Schedules):
def __init__(self):
super(RouteSchedules, self).__init__("route_schedules")
@marshal_with(route_schedules)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(RouteSchedules, self).get(uri=uri, region=region, lon=lon,
lat=lat)
stop_schedule = {
"stop_point": PbField(stop_point),
"route": PbField(route, attribute="route"),
"additional_informations": enum_type(attribute="response_status"),
"display_informations": PbField(display_informations_route,
attribute='pt_display_informations'),
"date_times": fields.List(fields.Nested(date_time)),
"links": UrisToLinks()
}
stop_schedules = {
"stop_schedules": fields.List(fields.Nested(stop_schedule)),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error')
}
class StopSchedules(Schedules):
def __init__(self):
super(StopSchedules, self).__init__("departure_boards")
self.parsers["get"].add_argument("interface_version", type=int,
default=1, hidden=True)
@marshal_with(stop_schedules)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(StopSchedules, self).get(uri=uri, region=region, lon=lon,
lat=lat)
passage = {
"route": PbField(route, attribute="vehicle_journey.route"),
"stop_point": PbField(stop_point),
"stop_date_time": PbField(stop_date_time)
}
departures = {
"departures": fields.List(fields.Nested(passage),
attribute="next_departures"),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error')
}
arrivals = {
"arrivals": fields.List(fields.Nested(passage), attribute="next_arrivals"),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error')
}
class NextDepartures(Schedules):
def __init__(self):
super(NextDepartures, self).__init__("next_departures")
@marshal_with(departures)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None,
dest="nb_stoptimes"):
return super(NextDepartures, self).get(uri=uri, region=region, lon=lon,
lat=lat)
class NextArrivals(Schedules):
def __init__(self):
super(NextArrivals, self).__init__("next_arrivals")
@marshal_with(arrivals)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(NextArrivals, self).get(uri=uri, region=region, lon=lon,
lat=lat)
|
# coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from flask.ext.restful import fields, marshal_with, reqparse
from flask import request
from jormungandr import i_manager, utils
from jormungandr import timezone
from fields import stop_point, route, pagination, PbField, stop_date_time, \
additional_informations, stop_time_properties_links, display_informations_vj, \
display_informations_route, UrisToLinks, error, \
enum_type, SplitDateTime, MultiLineString, NonNullList, PbEnum, feed_publisher
from ResourceUri import ResourceUri, complete_links
import datetime
from jormungandr.interfaces.argument import ArgumentDoc
from jormungandr.interfaces.parsers import option_value, date_time_format
from errors import ManageError
from flask.ext.restful.inputs import natural, boolean
from jormungandr.interfaces.v1.fields import DisruptionsField
from jormungandr.resources_utc import ResourceUtc
from make_links import create_external_link
from functools import wraps
from copy import deepcopy
from navitiacommon import response_pb2
class Schedules(ResourceUri, ResourceUtc):
def __init__(self, endpoint):
ResourceUri.__init__(self)
ResourceUtc.__init__(self)
self.endpoint = endpoint
self.parsers = {}
self.parsers["get"] = reqparse.RequestParser(
argument_class=ArgumentDoc)
parser_get = self.parsers["get"]
parser_get.add_argument("filter", type=unicode)
parser_get.add_argument("from_datetime", type=date_time_format,
description="The datetime from which you want\
the schedules", default=None)
parser_get.add_argument("until_datetime", type=date_time_format,
description="The datetime until which you want\
the schedules", default=None)
parser_get.add_argument("duration", type=int, default=3600 * 24,
description="Maximum duration between datetime\
and the retrieved stop time")
parser_get.add_argument("depth", type=int, default=2)
parser_get.add_argument("count", type=int, default=10,
description="Number of schedules per page")
parser_get.add_argument("start_page", type=int, default=0,
description="The current page")
parser_get.add_argument("max_date_times", type=natural, default=10000,
description="Maximum number of schedule per\
stop_point/route")
parser_get.add_argument("forbidden_id[]", type=unicode,
description="DEPRECATED, replaced by forbidden_uris[]",
dest="__temporary_forbidden_id[]",
default=[],
action='append')
parser_get.add_argument("forbidden_uris[]", type=unicode,
description="forbidden uris",
dest="forbidden_uris[]",
default=[],
action='append')
parser_get.add_argument("calendar", type=unicode,
description="Id of the calendar")
parser_get.add_argument("distance", type=int, default=200,
description="Distance range of the query. Used only if a coord is in the query")
parser_get.add_argument("show_codes", type=boolean, default=False,
description="show more identification codes")
#Note: no default param for data freshness, the default depends on the API
parser_get.add_argument("data_freshness",
description='freshness of the data. '
'base_schedule is the long term planned schedule. '
'adapted_schedule is for planned ahead disruptions (strikes, '
'maintenances, ...). '
'realtime is to have the freshest possible data',
type=option_value(['base_schedule', 'adapted_schedule', 'realtime']))
parser_get.add_argument("_current_datetime", type=date_time_format, default=datetime.datetime.utcnow(),
description="The datetime we want to publish the disruptions from."
" Default is the current date and it is mainly used for debug.")
self.method_decorators.append(complete_links(self))
def get(self, uri=None, region=None, lon=None, lat=None):
args = self.parsers["get"].parse_args()
# for retrocompatibility purpose
for forbid_id in args['__temporary_forbidden_id[]']:
args['forbidden_uris[]'].append(forbid_id)
args["nb_stoptimes"] = args["count"]
args["interface_version"] = 1
if uri is None:
first_filter = args["filter"].lower().split("and")[0].strip()
parts = first_filter.lower().split("=")
if len(parts) != 2:
error = "Unable to parse filter {filter}"
return {"error": error.format(filter=args["filter"])}, 503
else:
self.region = i_manager.get_region(object_id=parts[1].strip())
else:
self.collection = 'schedules'
args["filter"] = self.get_filter(uri.split("/"), args)
self.region = i_manager.get_region(region, lon, lat)
timezone.set_request_timezone(self.region)
if not args["from_datetime"] and not args["until_datetime"]:
# no datetime given, default is the current time, and we activate the realtime
args['from_datetime'] = args['_current_datetime']
if args["calendar"]: # if we have a calendar, the dt is only used for sorting, so 00:00 is fine
args['from_datetime'] = args['from_datetime'].replace(hour=0, minute=0)
if not args['data_freshness']:
args['data_freshness'] = 'realtime'
elif not args.get('calendar'):
#if a calendar is given all times will be given in local (because the calendar might span over dst)
if args['from_datetime']:
args['from_datetime'] = self.convert_to_utc(args['from_datetime'])
if args['until_datetime']:
args['until_datetime'] = self.convert_to_utc(args['until_datetime'])
# we save the original datetime for debuging purpose
args['original_datetime'] = args['from_datetime']
if args['from_datetime']:
args['from_datetime'] = utils.date_to_timestamp(args['from_datetime'])
if args['until_datetime']:
args['until_datetime'] = utils.date_to_timestamp(args['until_datetime'])
if not args['data_freshness']:
# The data freshness depends on the API
# for route_schedule, by default we want the base schedule
if self.endpoint == 'route_schedules':
args['data_freshness'] = 'base_schedule'
# for stop_schedule and previous/next departure/arrival, we want the freshest data by default
else:
args['data_freshness'] = 'realtime'
if not args["from_datetime"] and args["until_datetime"]\
and self.endpoint[:4] == "next":
self.endpoint = "previous" + self.endpoint[4:]
self._register_interpreted_parameters(args)
return i_manager.dispatch(args, self.endpoint,
instance_name=self.region)
date_time = {
"date_time": SplitDateTime(date='date', time='time'),
"additional_informations": additional_informations(),
"links": stop_time_properties_links(),
'data_freshness': enum_type(attribute='realtime_level'),
}
row = {
"stop_point": PbField(stop_point),
"date_times": fields.List(fields.Nested(date_time))
}
header = {
"display_informations": PbField(display_informations_vj,
attribute='pt_display_informations'),
"additional_informations": NonNullList(PbEnum(response_pb2.SectionAdditionalInformationType)),
"links": UrisToLinks()
}
table_field = {
"rows": fields.List(fields.Nested(row)),
"headers": fields.List(fields.Nested(header))
}
route_schedule_fields = {
"table": PbField(table_field),
"display_informations": PbField(display_informations_route,
attribute='pt_display_informations'),
"links": UrisToLinks(),
"geojson": MultiLineString()
}
route_schedules = {
"error": PbField(error, attribute='error'),
"route_schedules": fields.List(fields.Nested(route_schedule_fields)),
"pagination": fields.Nested(pagination),
"disruptions": DisruptionsField,
"feed_publishers": fields.List(fields.Nested(feed_publisher))
}
class RouteSchedules(Schedules):
def __init__(self):
super(RouteSchedules, self).__init__("route_schedules")
@marshal_with(route_schedules)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(RouteSchedules, self).get(uri=uri, region=region, lon=lon,
lat=lat)
stop_schedule = {
"stop_point": PbField(stop_point),
"route": PbField(route, attribute="route"),
"additional_informations": enum_type(attribute="response_status"),
"display_informations": PbField(display_informations_route,
attribute='pt_display_informations'),
"date_times": fields.List(fields.Nested(date_time)),
"links": UrisToLinks()
}
stop_schedules = {
"stop_schedules": fields.List(fields.Nested(stop_schedule)),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error'),
"disruptions": DisruptionsField,
"feed_publishers": fields.List(fields.Nested(feed_publisher))
}
class StopSchedules(Schedules):
def __init__(self):
super(StopSchedules, self).__init__("departure_boards")
self.parsers["get"].add_argument("interface_version", type=int,
default=1, hidden=True)
@marshal_with(stop_schedules)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(StopSchedules, self).get(uri=uri, region=region, lon=lon,
lat=lat)
passage = {
"route": PbField(route),
"stop_point": PbField(stop_point),
"stop_date_time": PbField(stop_date_time),
"display_informations": PbField(display_informations_vj,
attribute='pt_display_informations'),
}
departures = {
"departures": fields.List(fields.Nested(passage),
attribute="next_departures"),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error'),
"disruptions": DisruptionsField,
"feed_publishers": fields.List(fields.Nested(feed_publisher))
}
arrivals = {
"arrivals": fields.List(fields.Nested(passage), attribute="next_arrivals"),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error'),
"disruptions": DisruptionsField,
"feed_publishers": fields.List(fields.Nested(feed_publisher))
}
class add_passages_links:
"""
delete disruption links and put the disruptions directly in the owner objets
TEMPORARY: delete this as soon as the front end has the new disruptions integrated
"""
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
response, status, other = f(*args, **kwargs)
api = "departures" if "departures" in response else "arrivals" if "arrivals" in response else None
if not api:
return response, status, other
passages = response[api]
max_dt = "19000101T000000"
min_dt = "29991231T235959"
time_field = "arrival_date_time" if api == "arrivals" else "departure_date_time"
for passage_ in passages:
dt = passage_["stop_date_time"][time_field]
if min_dt > dt:
min_dt = dt
if max_dt < dt:
max_dt = dt
if "links" not in response:
response["links"] = []
kwargs_links = dict(deepcopy(request.args))
if "region" in kwargs:
kwargs_links["region"] = kwargs["region"]
if "uri" in kwargs:
kwargs_links["uri"] = kwargs["uri"]
if 'from_datetime' in kwargs_links:
kwargs_links.pop('from_datetime')
delta = datetime.timedelta(seconds=1)
dt = datetime.datetime.strptime(min_dt, "%Y%m%dT%H%M%S")
if 'until_datetime' in kwargs.get("uri"):
kwargs_links['until_datetime'] = (dt - delta).strftime("%Y%m%dT%H%M%S")
response["links"].append(create_external_link("v1."+api, rel="prev", _type=api, **kwargs_links))
kwargs_links.pop('until_datetime')
if 'from_datatime' in kwargs.get("uri"):
kwargs_links['from_datetime'] = (datetime.datetime.strptime(max_dt, "%Y%m%dT%H%M%S") + delta).strftime("%Y%m%dT%H%M%S")
response["links"].append(create_external_link("v1."+api, rel="next", _type=api, **kwargs_links))
return response, status, other
return wrapper
class NextDepartures(Schedules):
def __init__(self):
super(NextDepartures, self).__init__("next_departures")
@add_passages_links()
@marshal_with(departures)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None,
dest="nb_stoptimes"):
return super(NextDepartures, self).get(uri=uri, region=region, lon=lon,
lat=lat)
class NextArrivals(Schedules):
def __init__(self):
super(NextArrivals, self).__init__("next_arrivals")
@add_passages_links()
@marshal_with(arrivals)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(NextArrivals, self).get(uri=uri, region=region, lon=lon,
lat=lat)
use full_path
# coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from flask.ext.restful import fields, marshal_with, reqparse
from flask import request
from jormungandr import i_manager, utils
from jormungandr import timezone
from fields import stop_point, route, pagination, PbField, stop_date_time, \
additional_informations, stop_time_properties_links, display_informations_vj, \
display_informations_route, UrisToLinks, error, \
enum_type, SplitDateTime, MultiLineString, NonNullList, PbEnum, feed_publisher
from ResourceUri import ResourceUri, complete_links
import datetime
from jormungandr.interfaces.argument import ArgumentDoc
from jormungandr.interfaces.parsers import option_value, date_time_format
from errors import ManageError
from flask.ext.restful.inputs import natural, boolean
from jormungandr.interfaces.v1.fields import DisruptionsField
from jormungandr.resources_utc import ResourceUtc
from make_links import create_external_link
from functools import wraps
from copy import deepcopy
from navitiacommon import response_pb2
class Schedules(ResourceUri, ResourceUtc):
def __init__(self, endpoint):
ResourceUri.__init__(self)
ResourceUtc.__init__(self)
self.endpoint = endpoint
self.parsers = {}
self.parsers["get"] = reqparse.RequestParser(
argument_class=ArgumentDoc)
parser_get = self.parsers["get"]
parser_get.add_argument("filter", type=unicode)
parser_get.add_argument("from_datetime", type=date_time_format,
description="The datetime from which you want\
the schedules", default=None)
parser_get.add_argument("until_datetime", type=date_time_format,
description="The datetime until which you want\
the schedules", default=None)
parser_get.add_argument("duration", type=int, default=3600 * 24,
description="Maximum duration between datetime\
and the retrieved stop time")
parser_get.add_argument("depth", type=int, default=2)
parser_get.add_argument("count", type=int, default=10,
description="Number of schedules per page")
parser_get.add_argument("start_page", type=int, default=0,
description="The current page")
parser_get.add_argument("max_date_times", type=natural, default=10000,
description="Maximum number of schedule per\
stop_point/route")
parser_get.add_argument("forbidden_id[]", type=unicode,
description="DEPRECATED, replaced by forbidden_uris[]",
dest="__temporary_forbidden_id[]",
default=[],
action='append')
parser_get.add_argument("forbidden_uris[]", type=unicode,
description="forbidden uris",
dest="forbidden_uris[]",
default=[],
action='append')
parser_get.add_argument("calendar", type=unicode,
description="Id of the calendar")
parser_get.add_argument("distance", type=int, default=200,
description="Distance range of the query. Used only if a coord is in the query")
parser_get.add_argument("show_codes", type=boolean, default=False,
description="show more identification codes")
#Note: no default param for data freshness, the default depends on the API
parser_get.add_argument("data_freshness",
description='freshness of the data. '
'base_schedule is the long term planned schedule. '
'adapted_schedule is for planned ahead disruptions (strikes, '
'maintenances, ...). '
'realtime is to have the freshest possible data',
type=option_value(['base_schedule', 'adapted_schedule', 'realtime']))
parser_get.add_argument("_current_datetime", type=date_time_format, default=datetime.datetime.utcnow(),
description="The datetime we want to publish the disruptions from."
" Default is the current date and it is mainly used for debug.")
self.method_decorators.append(complete_links(self))
def get(self, uri=None, region=None, lon=None, lat=None):
args = self.parsers["get"].parse_args()
# for retrocompatibility purpose
for forbid_id in args['__temporary_forbidden_id[]']:
args['forbidden_uris[]'].append(forbid_id)
args["nb_stoptimes"] = args["count"]
args["interface_version"] = 1
if uri is None:
first_filter = args["filter"].lower().split("and")[0].strip()
parts = first_filter.lower().split("=")
if len(parts) != 2:
error = "Unable to parse filter {filter}"
return {"error": error.format(filter=args["filter"])}, 503
else:
self.region = i_manager.get_region(object_id=parts[1].strip())
else:
self.collection = 'schedules'
args["filter"] = self.get_filter(uri.split("/"), args)
self.region = i_manager.get_region(region, lon, lat)
timezone.set_request_timezone(self.region)
if not args["from_datetime"] and not args["until_datetime"]:
# no datetime given, default is the current time, and we activate the realtime
args['from_datetime'] = args['_current_datetime']
if args["calendar"]: # if we have a calendar, the dt is only used for sorting, so 00:00 is fine
args['from_datetime'] = args['from_datetime'].replace(hour=0, minute=0)
if not args['data_freshness']:
args['data_freshness'] = 'realtime'
elif not args.get('calendar'):
#if a calendar is given all times will be given in local (because the calendar might span over dst)
if args['from_datetime']:
args['from_datetime'] = self.convert_to_utc(args['from_datetime'])
if args['until_datetime']:
args['until_datetime'] = self.convert_to_utc(args['until_datetime'])
# we save the original datetime for debuging purpose
args['original_datetime'] = args['from_datetime']
if args['from_datetime']:
args['from_datetime'] = utils.date_to_timestamp(args['from_datetime'])
if args['until_datetime']:
args['until_datetime'] = utils.date_to_timestamp(args['until_datetime'])
if not args['data_freshness']:
# The data freshness depends on the API
# for route_schedule, by default we want the base schedule
if self.endpoint == 'route_schedules':
args['data_freshness'] = 'base_schedule'
# for stop_schedule and previous/next departure/arrival, we want the freshest data by default
else:
args['data_freshness'] = 'realtime'
if not args["from_datetime"] and args["until_datetime"]\
and self.endpoint[:4] == "next":
self.endpoint = "previous" + self.endpoint[4:]
self._register_interpreted_parameters(args)
return i_manager.dispatch(args, self.endpoint,
instance_name=self.region)
date_time = {
"date_time": SplitDateTime(date='date', time='time'),
"additional_informations": additional_informations(),
"links": stop_time_properties_links(),
'data_freshness': enum_type(attribute='realtime_level'),
}
row = {
"stop_point": PbField(stop_point),
"date_times": fields.List(fields.Nested(date_time))
}
header = {
"display_informations": PbField(display_informations_vj,
attribute='pt_display_informations'),
"additional_informations": NonNullList(PbEnum(response_pb2.SectionAdditionalInformationType)),
"links": UrisToLinks()
}
table_field = {
"rows": fields.List(fields.Nested(row)),
"headers": fields.List(fields.Nested(header))
}
route_schedule_fields = {
"table": PbField(table_field),
"display_informations": PbField(display_informations_route,
attribute='pt_display_informations'),
"links": UrisToLinks(),
"geojson": MultiLineString()
}
route_schedules = {
"error": PbField(error, attribute='error'),
"route_schedules": fields.List(fields.Nested(route_schedule_fields)),
"pagination": fields.Nested(pagination),
"disruptions": DisruptionsField,
"feed_publishers": fields.List(fields.Nested(feed_publisher))
}
class RouteSchedules(Schedules):
def __init__(self):
super(RouteSchedules, self).__init__("route_schedules")
@marshal_with(route_schedules)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(RouteSchedules, self).get(uri=uri, region=region, lon=lon,
lat=lat)
stop_schedule = {
"stop_point": PbField(stop_point),
"route": PbField(route, attribute="route"),
"additional_informations": enum_type(attribute="response_status"),
"display_informations": PbField(display_informations_route,
attribute='pt_display_informations'),
"date_times": fields.List(fields.Nested(date_time)),
"links": UrisToLinks()
}
stop_schedules = {
"stop_schedules": fields.List(fields.Nested(stop_schedule)),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error'),
"disruptions": DisruptionsField,
"feed_publishers": fields.List(fields.Nested(feed_publisher))
}
class StopSchedules(Schedules):
def __init__(self):
super(StopSchedules, self).__init__("departure_boards")
self.parsers["get"].add_argument("interface_version", type=int,
default=1, hidden=True)
@marshal_with(stop_schedules)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(StopSchedules, self).get(uri=uri, region=region, lon=lon,
lat=lat)
passage = {
"route": PbField(route),
"stop_point": PbField(stop_point),
"stop_date_time": PbField(stop_date_time),
"display_informations": PbField(display_informations_vj,
attribute='pt_display_informations'),
}
departures = {
"departures": fields.List(fields.Nested(passage),
attribute="next_departures"),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error'),
"disruptions": DisruptionsField,
"feed_publishers": fields.List(fields.Nested(feed_publisher))
}
arrivals = {
"arrivals": fields.List(fields.Nested(passage), attribute="next_arrivals"),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error'),
"disruptions": DisruptionsField,
"feed_publishers": fields.List(fields.Nested(feed_publisher))
}
class add_passages_links:
"""
delete disruption links and put the disruptions directly in the owner objets
TEMPORARY: delete this as soon as the front end has the new disruptions integrated
"""
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
response, status, other = f(*args, **kwargs)
api = "departures" if "departures" in response else "arrivals" if "arrivals" in response else None
if not api:
return response, status, other
passages = response[api]
max_dt = "19000101T000000"
min_dt = "29991231T235959"
time_field = "arrival_date_time" if api == "arrivals" else "departure_date_time"
for passage_ in passages:
dt = passage_["stop_date_time"][time_field]
if min_dt > dt:
min_dt = dt
if max_dt < dt:
max_dt = dt
if "links" not in response:
response["links"] = []
kwargs_links = dict(deepcopy(request.args))
if "region" in kwargs:
kwargs_links["region"] = kwargs["region"]
if "uri" in kwargs:
kwargs_links["uri"] = kwargs["uri"]
if 'from_datetime' in kwargs_links:
kwargs_links.pop('from_datetime')
delta = datetime.timedelta(seconds=1)
dt = datetime.datetime.strptime(min_dt, "%Y%m%dT%H%M%S")
if 'until_datetime' in getattr(request, 'full_path'):
kwargs_links['until_datetime'] = (dt - delta).strftime("%Y%m%dT%H%M%S")
response["links"].append(create_external_link("v1."+api, rel="prev", _type=api, **kwargs_links))
kwargs_links.pop('until_datetime')
if 'from_datatime' in getattr(request, 'full_path'):
kwargs_links['from_datetime'] = (datetime.datetime.strptime(max_dt, "%Y%m%dT%H%M%S") + delta).strftime("%Y%m%dT%H%M%S")
response["links"].append(create_external_link("v1."+api, rel="next", _type=api, **kwargs_links))
return response, status, other
return wrapper
class NextDepartures(Schedules):
def __init__(self):
super(NextDepartures, self).__init__("next_departures")
@add_passages_links()
@marshal_with(departures)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None,
dest="nb_stoptimes"):
return super(NextDepartures, self).get(uri=uri, region=region, lon=lon,
lat=lat)
class NextArrivals(Schedules):
def __init__(self):
super(NextArrivals, self).__init__("next_arrivals")
@add_passages_links()
@marshal_with(arrivals)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(NextArrivals, self).get(uri=uri, region=region, lon=lon,
lat=lat)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import requests
from requests_toolbelt import MultipartEncoder
from upstream.chunk import Chunk
from upstream.exc import FileError, ResponseError, ConnectError
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
try:
from urllib2 import urlopen, URLError
except ImportError:
from urllib.retrieve import urlopen, URLError
class Streamer(object):
def __init__(self, server):
""" For uploading and downloading files from Metadisk.
:param server: URL to the Metadisk server
"""
self.server = server
self.check_connectivity()
def check_connectivity(self):
"""
Check to see if we even get a connection to the server.
https://stackoverflow.com/questions/3764291/checking-network-connection
"""
try:
urlopen(self.server, timeout=1)
except URLError:
raise ConnectError("Could not connect to server.")
def upload(self, filepath):
""" Uploads a chunk via POST to the specified node
to the web-core API. See API docs:
https://github.com/Storj/web-core#api-documentation
:param filepath: Path to file as a string
:return: upstream.chunk.Chunk
:raise LookupError: IF
"""
# Open the file and upload it via POST
url = self.server + "/api/upload" # web-core API
r = self._upload_form_encoded(url, filepath)
# Make sure that the API call is actually there
if r.status_code == 404:
raise ResponseError("API call not found.")
elif r.status_code == 402:
raise ResponseError("Payment required.")
elif r.status_code == 500:
raise ResponseError("Server error.")
elif r.status_code == 201:
# Everthing checked out, return result
# based on the format selected
chunk = Chunk()
return chunk.load_json(r.text)
else:
raise ResponseError("Received status code %s %s"
% (r.status_code, r.reason))
def download(self, chunk_list, shredder_data=None, destination=""):
"""Download a chunk via GET from the specified node.
:param chunk_list:
:param shredder_data:
:param destination:
"""
if len(chunk_list) <= 0:
pass
elif len(chunk_list) == 1:
self._download_chunk(chunk_list[0], destination)
else:
for chunk in chunk_list:
self._download_chunk(chunk, "download/" + chunk.filename)
shredder_data.merge_chunks()
def check_path(self, filepath):
""" Expands and validates a given path to a file and returns it
:param filepath: Path to file as string
:return: Expanded validated path as string
:raise FileError: If path is not a file or does not exist
"""
expandedpath = os.path.expanduser(filepath)
try:
assert os.path.isfile(expandedpath)
except AssertionError:
raise FileError("%s not a file or not found" % filepath)
return expandedpath
def _upload_form_encoded(self, url, filepath):
""" Streams file from disk and uploads it.
:param url: API endpoint as URL to upload to
:param filepath: Path to file as string
:return: requests.Response
"""
validpath = self.check_path(filepath)
m = MultipartEncoder(
{
'file': ('testfile', open(validpath, 'rb'))
}
)
headers = {
'Content-Type': m.content_type
}
return requests.post(url, data=m, headers=headers)
def _upload_chunked_encoded(self, url, filepath):
""" Uploads a file using chunked transfer encoding.
web-core does not currently accept this type of uploads because
of issues in upstream projects, primariy flask and werkzeug. Leaving
it here for posterity as it might be useful in the future.
This function currently rases a NotImplementedError currently becuase
it's purposely "deactivated".
:param url: API endpoint as URL to upload to
:param filepath: Path to file as string
:return: requests.Response
:raise NotImplementedError: Raises this error on any call.
"""
# validpath = self._check_path(filepath)
# return requests.post(url, data=self._filestream(validpath))
raise NotImplementedError
def _download_chunk(self, chunk, destination=""):
""" Download a chunk via GET from the specified node.
https://github.com/storj/web-core
:param chunk: Information about the chunk to download.
:param destination: Path where we store the file.
"""
# Generate request URL
if chunk.decryptkey == "":
url = self.server + "/api/download/" + chunk.filehash
else:
url = self.server + "/api/download/" + chunk.get_uri()
# Retrieve chunk from the server and pass it the default file directory
# or override it to a particular place
if destination == "":
return urlretrieve(url, "files/" + chunk.filehash)
else:
return urlretrieve(url, destination)
def _filestream(self, filepath):
""" Streaming file generator
:param filepath: Path to file to stream
:raise FileError: If path is not valid
"""
expandedpath = os.path.expanduser(filepath)
try:
os.path.isfile(expandedpath)
except AssertionError:
raise FileError("%s not a file or not found" % filepath)
with open(expandedpath, 'rb') as f:
while True:
chunk = f.read(self.chunk_size)
if not chunk:
break
yield chunk
Use the damn chunkerror
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import requests
from requests_toolbelt import MultipartEncoder
from upstream.chunk import Chunk
from upstream.exc import FileError, ResponseError, ConnectError, ChunkError
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
try:
from urllib2 import urlopen, URLError
except ImportError:
from urllib.retrieve import urlopen, URLError
class Streamer(object):
def __init__(self, server):
""" For uploading and downloading files from Metadisk.
:param server: URL to the Metadisk server
"""
self.server = server
self.check_connectivity()
def check_connectivity(self):
"""
Check to see if we even get a connection to the server.
https://stackoverflow.com/questions/3764291/checking-network-connection
"""
try:
urlopen(self.server, timeout=1)
except URLError:
raise ConnectError("Could not connect to server.")
def upload(self, filepath):
""" Uploads a chunk via POST to the specified node
to the web-core API. See API docs:
https://github.com/Storj/web-core#api-documentation
:param filepath: Path to file as a string
:return: upstream.chunk.Chunk
:raise LookupError: IF
"""
# Open the file and upload it via POST
url = self.server + "/api/upload" # web-core API
r = self._upload_form_encoded(url, filepath)
# Make sure that the API call is actually there
if r.status_code == 404:
raise ResponseError("API call not found.")
elif r.status_code == 402:
raise ResponseError("Payment required.")
elif r.status_code == 500:
raise ResponseError("Server error.")
elif r.status_code == 201:
# Everthing checked out, return result
# based on the format selected
chunk = Chunk()
return chunk.load_json(r.text)
else:
raise ResponseError("Received status code %s %s"
% (r.status_code, r.reason))
def download(self, chunk_list, shredder_data=None, destination=""):
"""Download a chunk via GET from the specified node.
:param chunk_list:
:param shredder_data:
:param destination:
"""
if len(chunk_list) <= 0:
pass
elif len(chunk_list) == 1:
self._download_chunk(chunk_list[0], destination)
else:
for chunk in chunk_list:
self._download_chunk(chunk, "download/" + chunk.filename)
shredder_data.merge_chunks()
def check_path(self, filepath):
""" Expands and validates a given path to a file and returns it
:param filepath: Path to file as string
:return: Expanded validated path as string
:raise FileError: If path is not a file or does not exist
"""
expandedpath = os.path.expanduser(filepath)
try:
assert os.path.isfile(expandedpath)
except AssertionError:
raise FileError("%s not a file or not found" % filepath)
return expandedpath
def _upload_form_encoded(self, url, filepath):
""" Streams file from disk and uploads it.
:param url: API endpoint as URL to upload to
:param filepath: Path to file as string
:return: requests.Response
"""
validpath = self.check_path(filepath)
m = MultipartEncoder(
{
'file': ('testfile', open(validpath, 'rb'))
}
)
headers = {
'Content-Type': m.content_type
}
return requests.post(url, data=m, headers=headers)
def _upload_chunked_encoded(self, url, filepath):
""" Uploads a file using chunked transfer encoding.
web-core does not currently accept this type of uploads because
of issues in upstream projects, primariy flask and werkzeug. Leaving
it here for posterity as it might be useful in the future.
This function currently rases a NotImplementedError currently becuase
it's purposely "deactivated".
:param url: API endpoint as URL to upload to
:param filepath: Path to file as string
:return: requests.Response
:raise NotImplementedError: Raises this error on any call.
"""
# validpath = self._check_path(filepath)
# return requests.post(url, data=self._filestream(validpath))
raise NotImplementedError
def _download_chunk(self, chunk, destination=""):
""" Download a chunk via GET from the specified node.
https://github.com/storj/web-core
:param chunk: Information about the chunk to download.
:param destination: Path where we store the file.
"""
# Generate request URL
if chunk.decryptkey == "":
url = self.server + "/api/download/" + chunk.filehash
else:
url = self.server + "/api/download/" + chunk.get_uri()
# Retrieve chunk from the server and pass it the default file directory
# or override it to a particular place
if destination == "":
return urlretrieve(url, "files/" + chunk.filehash)
else:
return urlretrieve(url, destination)
def _filestream(self, filepath):
""" Streaming file generator
:param filepath: Path to file to stream
:raise FileError: If path is not valid
"""
expandedpath = os.path.expanduser(filepath)
try:
os.path.isfile(expandedpath)
except AssertionError:
raise FileError("%s not a file or not found" % filepath)
with open(expandedpath, 'rb') as f:
while True:
chunk = f.read(self.chunk_size)
if not chunk:
break
yield chunk
|
# coding=utf-8
from flask.ext.restful import fields, marshal_with, reqparse
from jormungandr import i_manager
from fields import stop_point, route, pagination, PbField, stop_date_time, \
additional_informations, stop_time_properties_links, display_informations_vj, \
display_informations_route, additional_informations_vj, UrisToLinks, error, \
enum_type
from ResourceUri import ResourceUri, add_notes, add_exception_dates
from datetime import datetime
from jormungandr.interfaces.argument import ArgumentDoc
from errors import ManageError
from flask.ext.restful.types import natural
class Schedules(ResourceUri):
parsers = {}
def __init__(self, endpoint):
super(Schedules, self).__init__()
self.endpoint = endpoint
self.parsers["get"] = reqparse.RequestParser(
argument_class=ArgumentDoc)
parser_get = self.parsers["get"]
parser_get.add_argument("filter", type=str)
parser_get.add_argument("from_datetime", type=str,
description="The datetime from which you want\
the schedules")
parser_get.add_argument("duration", type=int, default=3600 * 24,
description="Maximum duration between datetime\
and the retrieved stop time")
parser_get.add_argument("depth", type=int, default=2)
parser_get.add_argument("count", type=int, default=10,
description="Number of schedules per page")
parser_get.add_argument("start_page", type=int, default=0,
description="The current page")
parser_get.add_argument("max_date_times", type=natural,
description="Maximum number of schedule per\
stop_point/route")
parser_get.add_argument("forbidden_id[]", type=unicode,
description="forbidden ids",
dest="forbidden_uris[]",
action="append")
parser_get.add_argument("calendar", type=str,
description="Id of the calendar")
self.method_decorators.append(add_notes(self))
self.method_decorators.append(add_exception_dates(self))
def get(self, uri=None, region=None, lon=None, lat=None):
args = self.parsers["get"].parse_args()
args["nb_stoptimes"] = args["count"]
args["interface_version"] = 1
if uri is None:
first_filter = args["filter"].lower().split("and")[0].strip()
parts = first_filter.lower().split("=")
if len(parts) != 2:
error = "Unable to parse filter {filter}"
return {"error": error.format(filter=args["filter"])}, 503
else:
self.region = i_manager.key_of_id(parts[1].strip())
else:
self.collection = 'schedules'
args["filter"] = self.get_filter(uri.split("/"))
self.region = i_manager.get_region(region, lon, lat)
if not args["from_datetime"]:
args["from_datetime"] = datetime.now().strftime("%Y%m%dT1337")
return i_manager.dispatch(args, self.endpoint,
instance_name=self.region)
date_time = {
"date_time": fields.String(),
"additional_informations": additional_informations(),
"links": stop_time_properties_links(),
"status" : enum_type(attribute="response_status")
}
row = {
"stop_point": PbField(stop_point),
"date_times": fields.List(fields.Nested(date_time))
}
header = {
"display_informations": PbField(display_informations_vj,
attribute='pt_display_informations'),
"additional_informations": additional_informations_vj(),
"links": UrisToLinks()
}
table_field = {
"rows": fields.List(fields.Nested(row)),
"headers": fields.List(fields.Nested(header))
}
route_schedule_fields = {
"table": PbField(table_field),
"display_informations": PbField(display_informations_route,
attribute='pt_display_informations')
}
route_schedules = {
"error": PbField(error, attribute='error'),
"route_schedules": fields.List(fields.Nested(route_schedule_fields)),
"pagination": fields.Nested(pagination)
}
class RouteSchedules(Schedules):
def __init__(self):
super(RouteSchedules, self).__init__("route_schedules")
@marshal_with(route_schedules)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(RouteSchedules, self).get(uri=uri, region=region, lon=lon,
lat=lat)
stop_schedule = {
"stop_point": PbField(stop_point),
"route": PbField(route, attribute="route"),
"display_informations": PbField(display_informations_route,
attribute='pt_display_informations'),
"date_times": fields.List(fields.Nested(date_time)),
"links": UrisToLinks()
}
stop_schedules = {
"stop_schedules": fields.List(fields.Nested(stop_schedule)),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error')
}
class StopSchedules(Schedules):
def __init__(self):
super(StopSchedules, self).__init__("departure_boards")
self.parsers["get"].add_argument("interface_version", type=int,
default=1, hidden=True)
@marshal_with(stop_schedules)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(StopSchedules, self).get(uri=uri, region=region, lon=lon,
lat=lat)
passage = {
"route": PbField(route, attribute="vehicle_journey.route"),
"stop_point": PbField(stop_point),
"stop_date_time": PbField(stop_date_time)
}
departures = {
"departures": fields.List(fields.Nested(passage),
attribute="next_departures"),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error')
}
arrivals = {
"arrivals": fields.List(fields.Nested(passage), attribute="next_arrivals"),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error')
}
class NextDepartures(Schedules):
def __init__(self):
super(NextDepartures, self).__init__("next_departures")
@marshal_with(departures)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None,
dest="nb_stoptimes"):
return super(NextDepartures, self).get(uri=uri, region=region, lon=lon,
lat=lat)
class NextArrivals(Schedules):
def __init__(self):
super(NextArrivals, self).__init__("next_arrivals")
@marshal_with(arrivals)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(NextArrivals, self).get(uri=uri, region=region, lon=lon,
lat=lat)
jormungandr : manage terminus on stop_schedule
# coding=utf-8
from flask.ext.restful import fields, marshal_with, reqparse
from jormungandr import i_manager
from fields import stop_point, route, pagination, PbField, stop_date_time, \
additional_informations, stop_time_properties_links, display_informations_vj, \
display_informations_route, additional_informations_vj, UrisToLinks, error, \
enum_type
from ResourceUri import ResourceUri, add_notes, add_exception_dates
from datetime import datetime
from jormungandr.interfaces.argument import ArgumentDoc
from errors import ManageError
from flask.ext.restful.types import natural
class Schedules(ResourceUri):
parsers = {}
def __init__(self, endpoint):
super(Schedules, self).__init__()
self.endpoint = endpoint
self.parsers["get"] = reqparse.RequestParser(
argument_class=ArgumentDoc)
parser_get = self.parsers["get"]
parser_get.add_argument("filter", type=str)
parser_get.add_argument("from_datetime", type=str,
description="The datetime from which you want\
the schedules")
parser_get.add_argument("duration", type=int, default=3600 * 24,
description="Maximum duration between datetime\
and the retrieved stop time")
parser_get.add_argument("depth", type=int, default=2)
parser_get.add_argument("count", type=int, default=10,
description="Number of schedules per page")
parser_get.add_argument("start_page", type=int, default=0,
description="The current page")
parser_get.add_argument("max_date_times", type=natural,
description="Maximum number of schedule per\
stop_point/route")
parser_get.add_argument("forbidden_id[]", type=unicode,
description="forbidden ids",
dest="forbidden_uris[]",
action="append")
parser_get.add_argument("calendar", type=str,
description="Id of the calendar")
self.method_decorators.append(add_notes(self))
self.method_decorators.append(add_exception_dates(self))
def get(self, uri=None, region=None, lon=None, lat=None):
args = self.parsers["get"].parse_args()
args["nb_stoptimes"] = args["count"]
args["interface_version"] = 1
if uri is None:
first_filter = args["filter"].lower().split("and")[0].strip()
parts = first_filter.lower().split("=")
if len(parts) != 2:
error = "Unable to parse filter {filter}"
return {"error": error.format(filter=args["filter"])}, 503
else:
self.region = i_manager.key_of_id(parts[1].strip())
else:
self.collection = 'schedules'
args["filter"] = self.get_filter(uri.split("/"))
self.region = i_manager.get_region(region, lon, lat)
if not args["from_datetime"]:
args["from_datetime"] = datetime.now().strftime("%Y%m%dT1337")
return i_manager.dispatch(args, self.endpoint,
instance_name=self.region)
date_time = {
"date_time": fields.String(),
"additional_informations": additional_informations(),
"links": stop_time_properties_links()
}
row = {
"stop_point": PbField(stop_point),
"date_times": fields.List(fields.Nested(date_time))
}
header = {
"display_informations": PbField(display_informations_vj,
attribute='pt_display_informations'),
"additional_informations": additional_informations_vj(),
"links": UrisToLinks()
}
table_field = {
"rows": fields.List(fields.Nested(row)),
"headers": fields.List(fields.Nested(header))
}
route_schedule_fields = {
"table": PbField(table_field),
"display_informations": PbField(display_informations_route,
attribute='pt_display_informations')
}
route_schedules = {
"error": PbField(error, attribute='error'),
"route_schedules": fields.List(fields.Nested(route_schedule_fields)),
"pagination": fields.Nested(pagination)
}
class RouteSchedules(Schedules):
def __init__(self):
super(RouteSchedules, self).__init__("route_schedules")
@marshal_with(route_schedules)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(RouteSchedules, self).get(uri=uri, region=region, lon=lon,
lat=lat)
stop_schedule = {
"stop_point": PbField(stop_point),
"route": PbField(route, attribute="route"),
"additional_informations": enum_type(attribute="response_status"),
"display_informations": PbField(display_informations_route,
attribute='pt_display_informations'),
"date_times": fields.List(fields.Nested(date_time)),
"links": UrisToLinks()
}
stop_schedules = {
"stop_schedules": fields.List(fields.Nested(stop_schedule)),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error')
}
class StopSchedules(Schedules):
def __init__(self):
super(StopSchedules, self).__init__("departure_boards")
self.parsers["get"].add_argument("interface_version", type=int,
default=1, hidden=True)
@marshal_with(stop_schedules)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(StopSchedules, self).get(uri=uri, region=region, lon=lon,
lat=lat)
passage = {
"route": PbField(route, attribute="vehicle_journey.route"),
"stop_point": PbField(stop_point),
"stop_date_time": PbField(stop_date_time)
}
departures = {
"departures": fields.List(fields.Nested(passage),
attribute="next_departures"),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error')
}
arrivals = {
"arrivals": fields.List(fields.Nested(passage), attribute="next_arrivals"),
"pagination": fields.Nested(pagination),
"error": PbField(error, attribute='error')
}
class NextDepartures(Schedules):
def __init__(self):
super(NextDepartures, self).__init__("next_departures")
@marshal_with(departures)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None,
dest="nb_stoptimes"):
return super(NextDepartures, self).get(uri=uri, region=region, lon=lon,
lat=lat)
class NextArrivals(Schedules):
def __init__(self):
super(NextArrivals, self).__init__("next_arrivals")
@marshal_with(arrivals)
@ManageError()
def get(self, uri=None, region=None, lon=None, lat=None):
return super(NextArrivals, self).get(uri=uri, region=region, lon=lon,
lat=lat)
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import sys
import copy
import sysconfig
import itertools
from functools import lru_cache
from pathlib import PurePath, Path
from cerbero import enums
from cerbero.errors import FatalError, ConfigurationError
from cerbero.utils import _, system_info, validate_packager, to_unixpath,\
shell, parse_file, detect_qt5
from cerbero.utils import messages as m
from cerbero.ide.vs.env import get_vs_version
CONFIG_DIR = os.path.expanduser('~/.cerbero')
CONFIG_EXT = 'cbc'
DEFAULT_CONFIG_FILENAME = 'cerbero.%s' % CONFIG_EXT
DEFAULT_CONFIG_FILE = os.path.join(CONFIG_DIR, DEFAULT_CONFIG_FILENAME)
DEFAULT_GIT_ROOT = 'https://gitlab.freedesktop.org/gstreamer'
DEFAULT_ALLOW_PARALLEL_BUILD = True
DEFAULT_PACKAGER = "Default <default@change.me>"
CERBERO_UNINSTALLED = 'CERBERO_UNINSTALLED'
DEFAULT_MIRRORS = ['https://gstreamer.freedesktop.org/src/mirror/']
Platform = enums.Platform
Architecture = enums.Architecture
Distro = enums.Distro
DistroVersion = enums.DistroVersion
License = enums.License
LibraryType = enums.LibraryType
def set_nofile_ulimit():
'''
Some newer toolchains such as our GCC 8.2 cross toolchain exceed the
1024 file ulimit, so let's increase it.
See: https://gitlab.freedesktop.org/gstreamer/cerbero/issues/165
'''
try:
import resource
except ImportError:
return
want = 2048
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < want or hard < want:
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (want, want))
except (OSError, ValueError):
print('Failed to increase file ulimit, you may see linker failures')
class Variants(object):
__disabled_variants = ['x11', 'alsa', 'pulse', 'cdparanoia', 'v4l2',
'gi', 'unwind', 'rpi', 'visualstudio', 'qt5',
'intelmsdk', 'nvcodec', 'python', 'werror', 'vaapi']
__enabled_variants = ['debug', 'testspackage']
__all_variants = __enabled_variants + __disabled_variants
def __init__(self, variants):
for v in self.__enabled_variants:
setattr(self, v, True)
for v in self.__disabled_variants:
setattr(self, v, False)
for v in variants:
if v.startswith('no'):
if v[2:] not in self.__all_variants:
m.warning('Variant {} is unknown or obsolete'.format(v[2:]))
setattr(self, v[2:], False)
else:
if v not in self.__all_variants:
m.warning('Variant {} is unknown or obsolete'.format(v))
setattr(self, v, True)
def __getattr__(self, name):
try:
if name.startswith('no'):
return not object.__getattribute__(self, name[2:])
else:
return object.__getattribute__(self, name)
except Exception:
raise AttributeError("%s is not a known variant" % name)
def __repr__(self):
return '<Variants: {}>'.format(self.__dict__)
def all(self):
return sorted(self.__all_variants)
class Config (object):
_properties = ['platform', 'target_platform', 'arch', 'target_arch',
'prefix', 'recipes_dir', 'host', 'build', 'target',
'sources', 'local_sources', 'lib_suffix', 'git_root',
'distro', 'target_distro', 'environ_dir', 'cache_file',
'toolchain_prefix', 'toolchain_version', 'distro_version',
'target_distro_version', 'allow_system_libs',
'packages_dir', 'py_prefix', 'logs',
'install_dir', 'allow_parallel_build', 'num_of_cpus',
'use_configure_cache', 'packages_prefix', 'packager',
'data_dir', 'min_osx_sdk_version', 'external_recipes',
'external_packages', 'use_ccache', 'force_git_commit',
'universal_archs', 'osx_target_sdk_version', 'variants',
'build_tools_prefix', 'build_tools_sources',
'build_tools_cache', 'home_dir', 'recipes_commits',
'recipes_remotes', 'ios_platform', 'extra_build_tools',
'distro_packages_install', 'interactive', 'bash_completions',
'target_arch_flags', 'sysroot', 'isysroot',
'extra_lib_path', 'cached_sources', 'tools_prefix',
'ios_min_version', 'toolchain_path', 'mingw_perl_prefix',
'msvc_version', 'msvc_toolchain_env', 'mingw_toolchain_env',
'meson_cross_properties', 'manifest', 'extra_properties',
'qt5_qmake_path', 'qt5_pkgconfigdir', 'for_shell',
'package_tarball_compression', 'extra_mirrors',
'extra_bootstrap_packages', 'moltenvk_prefix']
cookbook = None
def __init__(self):
self._check_uninstalled()
self.python_exe = Path(sys.executable).as_posix()
for a in self._properties:
setattr(self, a, None)
self.arch_config = {self.target_arch: self}
# Store raw os.environ data
self._pre_environ = os.environ.copy()
self.config_env = os.environ.copy()
def _copy(self, arch):
c = copy.deepcopy(self)
c.target_arch = arch
return c
def _is_env_multipath_key(self, key):
return key in ('LD_LIBRARY_PATH', 'PATH', 'MANPATH', 'INFOPATH',
'PKG_CONFIG_PATH', 'PKG_CONFIG_LIBDIR', 'GI_TYPELIB_PATH',
'XDG_DATA_DIRS', 'XDG_CONFIG_DIRS', 'GST_PLUGIN_PATH',
'GST_PLUGIN_PATH_1_0', 'PYTHONPATH', 'MONO_PATH')
def load(self, filename=None, variants_override=None):
if variants_override is None:
variants_override = []
# First load the default configuration
self.load_defaults()
# Next parse the main configuration file
self._load_main_config()
# Ensure that Cerbero config files know about these variants, and that
# they override the values from the user configuration file above
self.variants += variants_override
# Next, if a config file is provided use it to override the settings
# from the main configuration file
self._load_cmd_config(filename)
# Create a copy of the config for each architecture in case we are
# building Universal binaries
if self.target_arch == Architecture.UNIVERSAL:
arch_config = {}
if isinstance(self.universal_archs, list):
# Simple list of architectures, just duplicate all the config
for arch in self.universal_archs:
arch_config[arch] = self._copy(arch)
elif isinstance(self.universal_archs, dict):
# Map of architectures to the corresponding config file. We
# do this so that we don't need to duplicate arch specific
# config again in the universal config.
for arch, config_file in list(self.universal_archs.items()):
arch_config[arch] = self._copy(arch)
# Allow the config to detect whether this config is
# running under a universal setup and some
# paths/configuration need to change
arch_config[arch].variants += ['universal']
if config_file is not None:
# This works because the override config files are
# fairly light. Things break if they are more complex
# as load config can have side effects in global state
d = os.path.dirname(filename[0])
for f in filename:
if 'universal' in f:
d = os.path.dirname(f)
arch_config[arch]._load_cmd_config([os.path.join(d, config_file)])
else:
raise ConfigurationError('universal_archs must be a list or a dict')
self.arch_config = arch_config
# Finally fill the missing gaps in the config
self._load_last_defaults()
self._load_platform_config()
# And validate properties
self._validate_properties()
for config in list(self.arch_config.values()):
if self.target_arch == Architecture.UNIVERSAL:
config.sources = os.path.join(self.sources, config.target_arch)
config.prefix = os.path.join(self.prefix)
# qmake_path is different for each arch in android-universal, but
# not in ios-universal.
qtpkgdir, qmake5 = detect_qt5(config.target_platform, config.target_arch,
self.target_arch == Architecture.UNIVERSAL)
config.set_property('qt5_qmake_path', qmake5)
config.set_property('qt5_pkgconfigdir', qtpkgdir)
config._load_platform_config()
config._load_last_defaults()
config._validate_properties()
# Ensure that variants continue to override all other configuration
self.variants += variants_override
# Build variants before copying any config
self.variants = Variants(self.variants)
if not self.prefix_is_executable() and self.variants.gi:
m.warning(_("gobject introspection requires an executable "
"prefix, 'gi' variant will be removed"))
self.variants.gi = False
for c in list(self.arch_config.values()):
c.variants = self.variants
self.do_setup_env()
if self.variants.visualstudio and self.msvc_version is not None:
m.message('Building recipes with Visual Studio {} whenever possible'
.format(get_vs_version(self.msvc_version)))
# Store current os.environ data
for c in list(self.arch_config.values()):
self._create_path(c.local_sources)
self._create_path(c.sources)
self._create_path(c.logs)
def do_setup_env(self):
self._create_path(self.prefix)
self._create_path(os.path.join(self.prefix, 'share', 'aclocal'))
self._create_path(os.path.join(
self.build_tools_prefix, 'share', 'aclocal'))
libdir = os.path.join(self.prefix, 'lib%s' % self.lib_suffix)
self.libdir = libdir
self.env = self.get_env(self.prefix, libdir, self.py_prefix)
@lru_cache(maxsize=None)
def get_env(self, prefix, libdir, py_prefix):
# Get paths for environment variables
includedir = os.path.join(prefix, 'include')
bindir = os.path.join(prefix, 'bin')
manpathdir = os.path.join(prefix, 'share', 'man')
infopathdir = os.path.join(prefix, 'share', 'info')
pkgconfigbin = os.path.join(self.build_tools_prefix, 'bin', 'pkg-config')
pkgconfigdatadir = os.path.join(prefix, 'share', 'pkgconfig')
pkgconfigdir = os.path.join(libdir, 'pkgconfig')
typelibpath = os.path.join(libdir, 'girepository-1.0')
xdgdatadir = os.path.join(prefix, 'share')
xdgconfigdir = os.path.join(prefix, 'etc', 'xdg')
xcursordir = os.path.join(prefix, 'share', 'icons')
aclocaldir = os.path.join(prefix, 'share', 'aclocal')
perlversionpath = os.path.join(libdir, 'perl5', 'site_perl',
self._perl_version())
if self.target_platform == Platform.WINDOWS:
# On windows even if perl version is 5.8.8, modules can be
# installed in 5.8
perlversionpath = perlversionpath.rsplit('.', 1)[0]
perl5lib = ':'.join(
[to_unixpath(os.path.join(libdir, 'perl5')),
to_unixpath(perlversionpath)])
gstpluginpath = os.path.join(libdir, 'gstreamer-0.10')
gstpluginpath10 = os.path.join(libdir, 'gstreamer-1.0')
gstregistry = os.path.join('~', '.gstreamer-0.10',
'cerbero-registry-%s' % self.target_arch)
gstregistry10 = os.path.join('~', '.cache', 'gstreamer-1.0',
'cerbero-registry-%s' % self.target_arch)
gstregistry = os.path.expanduser(gstregistry)
gstregistry10 = os.path.expanduser(gstregistry10)
pypath = sysconfig.get_path('purelib', vars={'base': ''})
# Must strip \/ to ensure that the path is relative
pypath = PurePath(pypath.strip('\\/'))
# Starting with Python 3.7.1 on Windows, each PYTHONPATH must use the
# native path separator and must end in a path separator.
pythonpath = [str(prefix / pypath) + os.sep,
str(self.build_tools_prefix / pypath) + os.sep]
if self.platform == Platform.WINDOWS:
# On Windows, pypath doesn't include Python version although some
# packages (pycairo, gi, etc...) install themselves using Python
# version scheme like on a posix system.
# Let's add an extra path to PYTHONPATH for these libraries.
pypath = sysconfig.get_path('purelib', 'posix_prefix', {'base': ''})
pypath = PurePath(pypath.strip('\\/'))
pythonpath.append(str(prefix / pypath) + os.sep)
# Ensure python paths exists because setup.py won't create them
for path in pythonpath:
if self.platform == Platform.WINDOWS:
# pythonpaths start with 'Lib' on Windows, which is extremely
# undesirable since our libdir is 'lib'. Windows APIs are
# case-preserving case-insensitive.
path = path.lower()
self._create_path(path)
pythonpath = os.pathsep.join(pythonpath)
if self.platform == Platform.LINUX:
xdgdatadir += ":/usr/share:/usr/local/share"
ldflags = self.config_env.get('LDFLAGS', '')
ldflags_libdir = '-L%s ' % libdir
if ldflags_libdir not in ldflags:
# Ensure there's no leading whitespace in LDFLAGS
if ldflags:
ldflags += ' ' + ldflags_libdir
else:
ldflags = ldflags_libdir
path = self.config_env.get('PATH', None)
path = self._join_path(
os.path.join(self.build_tools_prefix, 'bin'), path)
# Add the prefix bindir after the build-tools bindir so that on Windows
# binaries are run with the same libraries that they are linked with.
if bindir not in path and self.prefix_is_executable():
path = self._join_path(bindir, path)
ld_library_path = self._join_path(
os.path.join(self.build_tools_prefix, 'lib'), path)
if not self.cross_compiling():
ld_library_path = self._join_path(ld_library_path, libdir)
if self.extra_lib_path is not None:
ld_library_path = self._join_path(ld_library_path, self.extra_lib_path)
if self.toolchain_prefix is not None:
ld_library_path = self._join_path(ld_library_path,
os.path.join(self.toolchain_prefix, 'lib'))
includedir = self._join_path(includedir,
os.path.join(self.toolchain_prefix, 'include'))
# Most of these variables are extracted from jhbuild
env = {'LD_LIBRARY_PATH': ld_library_path,
'LDFLAGS': ldflags,
'C_INCLUDE_PATH': includedir,
'CPLUS_INCLUDE_PATH': includedir,
'PATH': path,
'MANPATH': manpathdir,
'INFOPATH': infopathdir,
'PKG_CONFIG': pkgconfigbin,
'PKG_CONFIG_PATH': '%s' % pkgconfigdatadir,
'PKG_CONFIG_LIBDIR': '%s' % pkgconfigdir,
'GI_TYPELIB_PATH': typelibpath,
'XDG_DATA_DIRS': xdgdatadir,
'XDG_CONFIG_DIRS': xdgconfigdir,
'XCURSOR_PATH': xcursordir,
'ACLOCAL_FLAGS': '-I%s' % aclocaldir,
'ACLOCAL': "aclocal",
'PERL5LIB': perl5lib,
'GST_PLUGIN_PATH': gstpluginpath,
'GST_PLUGIN_PATH_1_0': gstpluginpath10,
'GST_REGISTRY': gstregistry,
'GST_REGISTRY_1_0': gstregistry10,
'PYTHONPATH': pythonpath,
'MONO_PATH': os.path.join(libdir, 'mono', '4.5'),
'MONO_GAC_PREFIX': prefix,
'GSTREAMER_ROOT': prefix,
'CERBERO_PREFIX': self.prefix,
'CERBERO_HOST_SOURCES': self.sources
}
# merge the config env with this new env
new_env = {}
for k in env.keys():
if k not in self.config_env:
new_env[k] = env[k]
else:
env_v = env[k]
config_v = self.config_env[k]
if env_v == config_v:
new_env[k] = env_v
elif k in ('LDFLAGS', 'PATH'):
# handled above
new_env[k] = env_v
elif self._is_env_multipath_key(k):
new_env[k] = self._join_path(env_v, config_v)
else:
raise FatalError("Don't know how to combine the environment "
"variable '%s' with values '%s' and '%s'" % (k, env_v, config_v))
for k in self.config_env.keys():
if k not in env:
new_env[k] = self.config_env[k]
return new_env
def load_defaults(self):
self.set_property('cache_file', None)
self.set_property('home_dir', self._default_home_dir())
self.set_property('prefix', None)
self.set_property('sources', None)
self.set_property('local_sources', None)
self.set_property('cached_sources', self._relative_path('sources'))
self.set_property('git_root', DEFAULT_GIT_ROOT)
self.set_property('allow_parallel_build', DEFAULT_ALLOW_PARALLEL_BUILD)
self.set_property('host', None)
self.set_property('build', None)
self.set_property('target', None)
platform, arch, distro, distro_version, num_of_cpus = system_info()
self.set_property('platform', platform)
self.set_property('num_of_cpus', num_of_cpus)
self.set_property('target_platform', platform)
self.set_property('arch', arch)
self.set_property('target_arch', arch)
self.set_property('distro', distro)
self.set_property('target_distro', distro)
self.set_property('distro_version', distro_version)
self.set_property('target_distro_version', distro_version)
self.set_property('packages_prefix', None)
self.set_property('packager', DEFAULT_PACKAGER)
self.set_property('package_tarball_compression', 'bz2')
stdlibpath = sysconfig.get_path('stdlib', vars={'installed_base': ''})[1:]
# Ensure that the path uses / as path separator and not \
self.set_property('py_prefix', PurePath(stdlibpath).as_posix())
self.set_property('lib_suffix', '')
self.set_property('data_dir', self._find_data_dir())
self.set_property('environ_dir', self._relative_path('config'))
self.set_property('recipes_dir', self._relative_path('recipes'))
self.set_property('packages_dir', self._relative_path('packages'))
self.set_property('allow_system_libs', True)
self.set_property('use_configure_cache', False)
self.set_property('external_recipes', {})
self.set_property('external_packages', {})
self.set_property('universal_archs', None)
self.set_property('variants', [])
self.set_property('build_tools_prefix', None)
self.set_property('build_tools_sources', None)
self.set_property('build_tools_cache', None)
self.set_property('recipes_commits', {})
self.set_property('recipes_remotes', {})
self.set_property('extra_build_tools', [])
self.set_property('distro_packages_install', True)
self.set_property('interactive', m.console_is_interactive())
self.set_property('meson_cross_properties', {})
self.set_property('manifest', None)
self.set_property('extra_properties', {})
self.set_property('extra_mirrors', [])
self.set_property('extra_bootstrap_packages', {})
self.set_property('bash_completions', set())
# Increase open-files limits
set_nofile_ulimit()
def set_property(self, name, value, force=False):
if name not in self._properties:
raise ConfigurationError('Unknown key %s' % name)
if force or getattr(self, name) is None:
setattr(self, name, value)
def get_recipes_repos(self):
recipes_dir = {'default': (self.recipes_dir, 0)}
for name, (path, priority) in self.external_recipes.items():
path = os.path.abspath(os.path.expanduser(path))
recipes_dir[name] = (path, priority)
return recipes_dir
def get_packages_repos(self):
packages_dir = {'default': (self.packages_dir, 0)}
for name, (path, priority) in self.external_packages.items():
path = os.path.abspath(os.path.expanduser(path))
packages_dir[name] = (path, priority)
return packages_dir
def recipe_commit(self, recipe_name):
if self.force_git_commit:
return self.force_git_commit
if recipe_name in self.recipes_commits:
return self.recipes_commits[recipe_name]
return None
def cross_compiling(self):
"Are we building for the host platform or not?"
# On Windows, building 32-bit on 64-bit is not cross-compilation since
# 32-bit Windows binaries run on 64-bit Windows via WOW64.
if self.platform == Platform.WINDOWS:
if self.arch == Architecture.X86_64 and \
self.target_arch == Architecture.X86:
return False
return self.target_platform != self.platform or \
self.target_arch != self.arch or \
self.target_distro_version != self.distro_version
def cross_universal_type(self):
if not self.cross_compiling():
return None
# cross-ios-universal, each arch prefix is merged and flattened into one prefix
if isinstance(self.universal_archs, list):
return 'flat'
# cross-android-universal, each arch prefix is separate
if isinstance(self.universal_archs, dict):
return 'normal'
return None
def prefix_is_executable(self):
"""Can the binaries from the target platform can be executed in the
build env?"""
if self.target_platform != self.platform:
return False
if self.target_arch != self.arch:
if self.target_arch == Architecture.X86 and \
self.arch == Architecture.X86_64:
return True
return False
return True
def prefix_is_build_tools(self):
return self.build_tools_prefix == self.prefix
def target_distro_version_gte(self, distro_version):
assert distro_version.startswith(self.target_distro + "_")
return self.target_distro_version >= distro_version
def _parse(self, filename, reset=True):
config = {'os': os, '__file__': filename, 'env' : self.config_env}
if not reset:
for prop in self._properties:
if hasattr(self, prop):
config[prop] = getattr(self, prop)
try:
parse_file(filename, config)
except:
raise ConfigurationError(_('Could not include config file (%s)') %
filename)
for key in self._properties:
if key in config:
self.set_property(key, config[key], True)
def _validate_properties(self):
if not validate_packager(self.packager):
raise FatalError(_('packager "%s" must be in the format '
'"Name <email>"') % self.packager)
def _check_uninstalled(self):
self.uninstalled = int(os.environ.get(CERBERO_UNINSTALLED, 0)) == 1
def _create_path(self, path):
if not os.path.exists(path):
try:
os.makedirs(path)
except:
raise FatalError(_('directory (%s) can not be created') % path)
def _join_path(self, path1, path2):
if len(path1) == 0:
return path2
if len(path2) == 0:
return path1
if self.platform == Platform.WINDOWS:
separator = ';'
else:
separator = ':'
return "%s%s%s" % (path1, separator, path2)
def _load_main_config(self):
if os.path.exists(DEFAULT_CONFIG_FILE):
m.message('Loading default configuration from {}'.format(DEFAULT_CONFIG_FILE))
self._parse(DEFAULT_CONFIG_FILE)
def _load_cmd_config(self, filenames):
if filenames is not None:
for f in filenames:
if not os.path.exists(f):
f = os.path.join(CONFIG_DIR, f + "." + CONFIG_EXT)
if os.path.exists(f):
self._parse(f, reset=False)
else:
raise ConfigurationError(_("Configuration file %s doesn't "
"exist") % f)
def _load_platform_config(self):
platform_config = os.path.join(self.environ_dir, '%s.config' %
self.target_platform)
arch_config = os.path.join(self.environ_dir, '%s_%s.config' %
(self.target_platform, self.target_arch))
for config_path in [platform_config, arch_config]:
if os.path.exists(config_path):
self._parse(config_path, reset=False)
def _load_last_defaults(self):
target_platform = self.target_platform
if target_platform == Platform.WINDOWS and 'visualstudio' in self.variants:
target_platform = 'msvc'
self.set_property('prefix', os.path.join(self.home_dir, "dist",
"%s_%s" % (target_platform, self.target_arch)))
self.set_property('sources', os.path.join(self.home_dir, "sources",
"%s_%s" % (target_platform, self.target_arch)))
self.set_property('logs', os.path.join(self.home_dir, "logs",
"%s_%s" % (target_platform, self.target_arch)))
self.set_property('cache_file',
"%s_%s.cache" % (target_platform, self.target_arch))
self.set_property('install_dir', self.prefix)
self.set_property('local_sources', self._default_local_sources_dir())
self.set_property('build_tools_prefix',
os.path.join(self.home_dir, 'build-tools'))
self.set_property('build_tools_sources',
os.path.join(self.home_dir, 'sources', 'build-tools'))
self.set_property('build_tools_cache', 'build-tools.cache')
def _find_data_dir(self):
if self.uninstalled:
self.data_dir = os.path.join(os.path.dirname(__file__),
'..', 'data')
self.data_dir = os.path.abspath(self.data_dir)
return
curdir = os.path.dirname(__file__)
while not os.path.exists(os.path.join(curdir, 'share', 'cerbero',
'config')):
curdir = os.path.abspath(os.path.join(curdir, '..'))
if curdir == '/' or curdir[1:] == ':/':
# We reached the root without finding the data dir, which
# shouldn't happen
raise FatalError("Data dir not found")
self.data_dir = os.path.join(curdir, 'share', 'cerbero')
def _relative_path(self, path):
if not self.uninstalled:
p = os.path.join(self.data_dir, path)
else:
p = os.path.join(os.path.dirname(__file__), '..', path)
return os.path.abspath(p)
def _default_home_dir(self):
if self.uninstalled:
p = os.path.join(os.path.dirname(__file__), '..', 'build')
else:
p = os.path.expanduser('~/cerbero')
return os.path.abspath(p)
def _default_local_sources_dir(self):
# For backwards-compatibility, keep the old value for setups that
# define their own home_dir inside which all cerbero work must be
# contained; f.ex. ci.gstreamer.net
if self.home_dir != self._default_home_dir():
return os.path.join(self.home_dir, 'sources', 'local')
# Default value should be in a user-specific location so that it can
# be shared across all cerbero directories and invocations
if self.platform == Platform.WINDOWS and 'USERPROFILE' in os.environ:
cache_dir = Path(os.environ['USERPROFILE']) / '.cache'
elif 'XDG_CACHE_HOME' in os.environ:
cache_dir = Path(os.environ['XDG_CACHE_HOME'])
else:
# Path.home() reads the HOME env var
cache_dir = Path.home() / '.cache'
return (cache_dir / 'cerbero-sources').as_posix()
@lru_cache()
def _perl_version(self):
try:
version = shell.check_output("perl -e 'print \"$]\";'")
except FatalError:
m.warning(_("Perl not found, you may need to run bootstrap."))
version = '0.000000'
# FIXME: when perl's mayor is >= 10
mayor = str(version[0])
minor = str(int(version[2:5]))
revision = str(int(version[5:8]))
return '.'.join([mayor, minor, revision])
Prepend $CERBERO_PREFIX/lib path in LD_LIBRARY_PATH
When not cross-compiling, $CERBERO_PREFIX/lib path was added at the end
of LD_LIBRARY_PATH whereas build-tools/lib path was prepended. As a
consequence, libraries and tools installed during bootstrap were taking
precedence over the one built later within the prefix (for libraries and
tools available at both places). This happens for example with glib when
it is installed during bootstrap (glib-tools.recipe) and built later as
a dependency (glib.recipe). This commit prepends $CERBERO_PREFIX/lib in
LD_LIBRARY_PATH in order to use the build-tools/lib when no dependency
is custom built, and then to use the $CERBERO_PREFIX/lib once the
dependency has been correctly built. It also brings coherency with PATH
variable order as $CERBERO_PREFIX/bin preceeds build-tools/bin.
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import sys
import copy
import sysconfig
import itertools
from functools import lru_cache
from pathlib import PurePath, Path
from cerbero import enums
from cerbero.errors import FatalError, ConfigurationError
from cerbero.utils import _, system_info, validate_packager, to_unixpath,\
shell, parse_file, detect_qt5
from cerbero.utils import messages as m
from cerbero.ide.vs.env import get_vs_version
CONFIG_DIR = os.path.expanduser('~/.cerbero')
CONFIG_EXT = 'cbc'
DEFAULT_CONFIG_FILENAME = 'cerbero.%s' % CONFIG_EXT
DEFAULT_CONFIG_FILE = os.path.join(CONFIG_DIR, DEFAULT_CONFIG_FILENAME)
DEFAULT_GIT_ROOT = 'https://gitlab.freedesktop.org/gstreamer'
DEFAULT_ALLOW_PARALLEL_BUILD = True
DEFAULT_PACKAGER = "Default <default@change.me>"
CERBERO_UNINSTALLED = 'CERBERO_UNINSTALLED'
DEFAULT_MIRRORS = ['https://gstreamer.freedesktop.org/src/mirror/']
Platform = enums.Platform
Architecture = enums.Architecture
Distro = enums.Distro
DistroVersion = enums.DistroVersion
License = enums.License
LibraryType = enums.LibraryType
def set_nofile_ulimit():
'''
Some newer toolchains such as our GCC 8.2 cross toolchain exceed the
1024 file ulimit, so let's increase it.
See: https://gitlab.freedesktop.org/gstreamer/cerbero/issues/165
'''
try:
import resource
except ImportError:
return
want = 2048
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < want or hard < want:
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (want, want))
except (OSError, ValueError):
print('Failed to increase file ulimit, you may see linker failures')
class Variants(object):
__disabled_variants = ['x11', 'alsa', 'pulse', 'cdparanoia', 'v4l2',
'gi', 'unwind', 'rpi', 'visualstudio', 'qt5',
'intelmsdk', 'nvcodec', 'python', 'werror', 'vaapi']
__enabled_variants = ['debug', 'testspackage']
__all_variants = __enabled_variants + __disabled_variants
def __init__(self, variants):
for v in self.__enabled_variants:
setattr(self, v, True)
for v in self.__disabled_variants:
setattr(self, v, False)
for v in variants:
if v.startswith('no'):
if v[2:] not in self.__all_variants:
m.warning('Variant {} is unknown or obsolete'.format(v[2:]))
setattr(self, v[2:], False)
else:
if v not in self.__all_variants:
m.warning('Variant {} is unknown or obsolete'.format(v))
setattr(self, v, True)
def __getattr__(self, name):
try:
if name.startswith('no'):
return not object.__getattribute__(self, name[2:])
else:
return object.__getattribute__(self, name)
except Exception:
raise AttributeError("%s is not a known variant" % name)
def __repr__(self):
return '<Variants: {}>'.format(self.__dict__)
def all(self):
return sorted(self.__all_variants)
class Config (object):
_properties = ['platform', 'target_platform', 'arch', 'target_arch',
'prefix', 'recipes_dir', 'host', 'build', 'target',
'sources', 'local_sources', 'lib_suffix', 'git_root',
'distro', 'target_distro', 'environ_dir', 'cache_file',
'toolchain_prefix', 'toolchain_version', 'distro_version',
'target_distro_version', 'allow_system_libs',
'packages_dir', 'py_prefix', 'logs',
'install_dir', 'allow_parallel_build', 'num_of_cpus',
'use_configure_cache', 'packages_prefix', 'packager',
'data_dir', 'min_osx_sdk_version', 'external_recipes',
'external_packages', 'use_ccache', 'force_git_commit',
'universal_archs', 'osx_target_sdk_version', 'variants',
'build_tools_prefix', 'build_tools_sources',
'build_tools_cache', 'home_dir', 'recipes_commits',
'recipes_remotes', 'ios_platform', 'extra_build_tools',
'distro_packages_install', 'interactive', 'bash_completions',
'target_arch_flags', 'sysroot', 'isysroot',
'extra_lib_path', 'cached_sources', 'tools_prefix',
'ios_min_version', 'toolchain_path', 'mingw_perl_prefix',
'msvc_version', 'msvc_toolchain_env', 'mingw_toolchain_env',
'meson_cross_properties', 'manifest', 'extra_properties',
'qt5_qmake_path', 'qt5_pkgconfigdir', 'for_shell',
'package_tarball_compression', 'extra_mirrors',
'extra_bootstrap_packages', 'moltenvk_prefix']
cookbook = None
def __init__(self):
self._check_uninstalled()
self.python_exe = Path(sys.executable).as_posix()
for a in self._properties:
setattr(self, a, None)
self.arch_config = {self.target_arch: self}
# Store raw os.environ data
self._pre_environ = os.environ.copy()
self.config_env = os.environ.copy()
def _copy(self, arch):
c = copy.deepcopy(self)
c.target_arch = arch
return c
def _is_env_multipath_key(self, key):
return key in ('LD_LIBRARY_PATH', 'PATH', 'MANPATH', 'INFOPATH',
'PKG_CONFIG_PATH', 'PKG_CONFIG_LIBDIR', 'GI_TYPELIB_PATH',
'XDG_DATA_DIRS', 'XDG_CONFIG_DIRS', 'GST_PLUGIN_PATH',
'GST_PLUGIN_PATH_1_0', 'PYTHONPATH', 'MONO_PATH')
def load(self, filename=None, variants_override=None):
if variants_override is None:
variants_override = []
# First load the default configuration
self.load_defaults()
# Next parse the main configuration file
self._load_main_config()
# Ensure that Cerbero config files know about these variants, and that
# they override the values from the user configuration file above
self.variants += variants_override
# Next, if a config file is provided use it to override the settings
# from the main configuration file
self._load_cmd_config(filename)
# Create a copy of the config for each architecture in case we are
# building Universal binaries
if self.target_arch == Architecture.UNIVERSAL:
arch_config = {}
if isinstance(self.universal_archs, list):
# Simple list of architectures, just duplicate all the config
for arch in self.universal_archs:
arch_config[arch] = self._copy(arch)
elif isinstance(self.universal_archs, dict):
# Map of architectures to the corresponding config file. We
# do this so that we don't need to duplicate arch specific
# config again in the universal config.
for arch, config_file in list(self.universal_archs.items()):
arch_config[arch] = self._copy(arch)
# Allow the config to detect whether this config is
# running under a universal setup and some
# paths/configuration need to change
arch_config[arch].variants += ['universal']
if config_file is not None:
# This works because the override config files are
# fairly light. Things break if they are more complex
# as load config can have side effects in global state
d = os.path.dirname(filename[0])
for f in filename:
if 'universal' in f:
d = os.path.dirname(f)
arch_config[arch]._load_cmd_config([os.path.join(d, config_file)])
else:
raise ConfigurationError('universal_archs must be a list or a dict')
self.arch_config = arch_config
# Finally fill the missing gaps in the config
self._load_last_defaults()
self._load_platform_config()
# And validate properties
self._validate_properties()
for config in list(self.arch_config.values()):
if self.target_arch == Architecture.UNIVERSAL:
config.sources = os.path.join(self.sources, config.target_arch)
config.prefix = os.path.join(self.prefix)
# qmake_path is different for each arch in android-universal, but
# not in ios-universal.
qtpkgdir, qmake5 = detect_qt5(config.target_platform, config.target_arch,
self.target_arch == Architecture.UNIVERSAL)
config.set_property('qt5_qmake_path', qmake5)
config.set_property('qt5_pkgconfigdir', qtpkgdir)
config._load_platform_config()
config._load_last_defaults()
config._validate_properties()
# Ensure that variants continue to override all other configuration
self.variants += variants_override
# Build variants before copying any config
self.variants = Variants(self.variants)
if not self.prefix_is_executable() and self.variants.gi:
m.warning(_("gobject introspection requires an executable "
"prefix, 'gi' variant will be removed"))
self.variants.gi = False
for c in list(self.arch_config.values()):
c.variants = self.variants
self.do_setup_env()
if self.variants.visualstudio and self.msvc_version is not None:
m.message('Building recipes with Visual Studio {} whenever possible'
.format(get_vs_version(self.msvc_version)))
# Store current os.environ data
for c in list(self.arch_config.values()):
self._create_path(c.local_sources)
self._create_path(c.sources)
self._create_path(c.logs)
def do_setup_env(self):
self._create_path(self.prefix)
self._create_path(os.path.join(self.prefix, 'share', 'aclocal'))
self._create_path(os.path.join(
self.build_tools_prefix, 'share', 'aclocal'))
libdir = os.path.join(self.prefix, 'lib%s' % self.lib_suffix)
self.libdir = libdir
self.env = self.get_env(self.prefix, libdir, self.py_prefix)
@lru_cache(maxsize=None)
def get_env(self, prefix, libdir, py_prefix):
# Get paths for environment variables
includedir = os.path.join(prefix, 'include')
bindir = os.path.join(prefix, 'bin')
manpathdir = os.path.join(prefix, 'share', 'man')
infopathdir = os.path.join(prefix, 'share', 'info')
pkgconfigbin = os.path.join(self.build_tools_prefix, 'bin', 'pkg-config')
pkgconfigdatadir = os.path.join(prefix, 'share', 'pkgconfig')
pkgconfigdir = os.path.join(libdir, 'pkgconfig')
typelibpath = os.path.join(libdir, 'girepository-1.0')
xdgdatadir = os.path.join(prefix, 'share')
xdgconfigdir = os.path.join(prefix, 'etc', 'xdg')
xcursordir = os.path.join(prefix, 'share', 'icons')
aclocaldir = os.path.join(prefix, 'share', 'aclocal')
perlversionpath = os.path.join(libdir, 'perl5', 'site_perl',
self._perl_version())
if self.target_platform == Platform.WINDOWS:
# On windows even if perl version is 5.8.8, modules can be
# installed in 5.8
perlversionpath = perlversionpath.rsplit('.', 1)[0]
perl5lib = ':'.join(
[to_unixpath(os.path.join(libdir, 'perl5')),
to_unixpath(perlversionpath)])
gstpluginpath = os.path.join(libdir, 'gstreamer-0.10')
gstpluginpath10 = os.path.join(libdir, 'gstreamer-1.0')
gstregistry = os.path.join('~', '.gstreamer-0.10',
'cerbero-registry-%s' % self.target_arch)
gstregistry10 = os.path.join('~', '.cache', 'gstreamer-1.0',
'cerbero-registry-%s' % self.target_arch)
gstregistry = os.path.expanduser(gstregistry)
gstregistry10 = os.path.expanduser(gstregistry10)
pypath = sysconfig.get_path('purelib', vars={'base': ''})
# Must strip \/ to ensure that the path is relative
pypath = PurePath(pypath.strip('\\/'))
# Starting with Python 3.7.1 on Windows, each PYTHONPATH must use the
# native path separator and must end in a path separator.
pythonpath = [str(prefix / pypath) + os.sep,
str(self.build_tools_prefix / pypath) + os.sep]
if self.platform == Platform.WINDOWS:
# On Windows, pypath doesn't include Python version although some
# packages (pycairo, gi, etc...) install themselves using Python
# version scheme like on a posix system.
# Let's add an extra path to PYTHONPATH for these libraries.
pypath = sysconfig.get_path('purelib', 'posix_prefix', {'base': ''})
pypath = PurePath(pypath.strip('\\/'))
pythonpath.append(str(prefix / pypath) + os.sep)
# Ensure python paths exists because setup.py won't create them
for path in pythonpath:
if self.platform == Platform.WINDOWS:
# pythonpaths start with 'Lib' on Windows, which is extremely
# undesirable since our libdir is 'lib'. Windows APIs are
# case-preserving case-insensitive.
path = path.lower()
self._create_path(path)
pythonpath = os.pathsep.join(pythonpath)
if self.platform == Platform.LINUX:
xdgdatadir += ":/usr/share:/usr/local/share"
ldflags = self.config_env.get('LDFLAGS', '')
ldflags_libdir = '-L%s ' % libdir
if ldflags_libdir not in ldflags:
# Ensure there's no leading whitespace in LDFLAGS
if ldflags:
ldflags += ' ' + ldflags_libdir
else:
ldflags = ldflags_libdir
path = self.config_env.get('PATH', None)
path = self._join_path(
os.path.join(self.build_tools_prefix, 'bin'), path)
# Add the prefix bindir after the build-tools bindir so that on Windows
# binaries are run with the same libraries that they are linked with.
if bindir not in path and self.prefix_is_executable():
path = self._join_path(bindir, path)
ld_library_path = self._join_path(
os.path.join(self.build_tools_prefix, 'lib'), path)
if not self.cross_compiling():
ld_library_path = self._join_path(libdir, ld_library_path)
if self.extra_lib_path is not None:
ld_library_path = self._join_path(ld_library_path, self.extra_lib_path)
if self.toolchain_prefix is not None:
ld_library_path = self._join_path(ld_library_path,
os.path.join(self.toolchain_prefix, 'lib'))
includedir = self._join_path(includedir,
os.path.join(self.toolchain_prefix, 'include'))
# Most of these variables are extracted from jhbuild
env = {'LD_LIBRARY_PATH': ld_library_path,
'LDFLAGS': ldflags,
'C_INCLUDE_PATH': includedir,
'CPLUS_INCLUDE_PATH': includedir,
'PATH': path,
'MANPATH': manpathdir,
'INFOPATH': infopathdir,
'PKG_CONFIG': pkgconfigbin,
'PKG_CONFIG_PATH': '%s' % pkgconfigdatadir,
'PKG_CONFIG_LIBDIR': '%s' % pkgconfigdir,
'GI_TYPELIB_PATH': typelibpath,
'XDG_DATA_DIRS': xdgdatadir,
'XDG_CONFIG_DIRS': xdgconfigdir,
'XCURSOR_PATH': xcursordir,
'ACLOCAL_FLAGS': '-I%s' % aclocaldir,
'ACLOCAL': "aclocal",
'PERL5LIB': perl5lib,
'GST_PLUGIN_PATH': gstpluginpath,
'GST_PLUGIN_PATH_1_0': gstpluginpath10,
'GST_REGISTRY': gstregistry,
'GST_REGISTRY_1_0': gstregistry10,
'PYTHONPATH': pythonpath,
'MONO_PATH': os.path.join(libdir, 'mono', '4.5'),
'MONO_GAC_PREFIX': prefix,
'GSTREAMER_ROOT': prefix,
'CERBERO_PREFIX': self.prefix,
'CERBERO_HOST_SOURCES': self.sources
}
# merge the config env with this new env
new_env = {}
for k in env.keys():
if k not in self.config_env:
new_env[k] = env[k]
else:
env_v = env[k]
config_v = self.config_env[k]
if env_v == config_v:
new_env[k] = env_v
elif k in ('LDFLAGS', 'PATH'):
# handled above
new_env[k] = env_v
elif self._is_env_multipath_key(k):
new_env[k] = self._join_path(env_v, config_v)
else:
raise FatalError("Don't know how to combine the environment "
"variable '%s' with values '%s' and '%s'" % (k, env_v, config_v))
for k in self.config_env.keys():
if k not in env:
new_env[k] = self.config_env[k]
return new_env
def load_defaults(self):
self.set_property('cache_file', None)
self.set_property('home_dir', self._default_home_dir())
self.set_property('prefix', None)
self.set_property('sources', None)
self.set_property('local_sources', None)
self.set_property('cached_sources', self._relative_path('sources'))
self.set_property('git_root', DEFAULT_GIT_ROOT)
self.set_property('allow_parallel_build', DEFAULT_ALLOW_PARALLEL_BUILD)
self.set_property('host', None)
self.set_property('build', None)
self.set_property('target', None)
platform, arch, distro, distro_version, num_of_cpus = system_info()
self.set_property('platform', platform)
self.set_property('num_of_cpus', num_of_cpus)
self.set_property('target_platform', platform)
self.set_property('arch', arch)
self.set_property('target_arch', arch)
self.set_property('distro', distro)
self.set_property('target_distro', distro)
self.set_property('distro_version', distro_version)
self.set_property('target_distro_version', distro_version)
self.set_property('packages_prefix', None)
self.set_property('packager', DEFAULT_PACKAGER)
self.set_property('package_tarball_compression', 'bz2')
stdlibpath = sysconfig.get_path('stdlib', vars={'installed_base': ''})[1:]
# Ensure that the path uses / as path separator and not \
self.set_property('py_prefix', PurePath(stdlibpath).as_posix())
self.set_property('lib_suffix', '')
self.set_property('data_dir', self._find_data_dir())
self.set_property('environ_dir', self._relative_path('config'))
self.set_property('recipes_dir', self._relative_path('recipes'))
self.set_property('packages_dir', self._relative_path('packages'))
self.set_property('allow_system_libs', True)
self.set_property('use_configure_cache', False)
self.set_property('external_recipes', {})
self.set_property('external_packages', {})
self.set_property('universal_archs', None)
self.set_property('variants', [])
self.set_property('build_tools_prefix', None)
self.set_property('build_tools_sources', None)
self.set_property('build_tools_cache', None)
self.set_property('recipes_commits', {})
self.set_property('recipes_remotes', {})
self.set_property('extra_build_tools', [])
self.set_property('distro_packages_install', True)
self.set_property('interactive', m.console_is_interactive())
self.set_property('meson_cross_properties', {})
self.set_property('manifest', None)
self.set_property('extra_properties', {})
self.set_property('extra_mirrors', [])
self.set_property('extra_bootstrap_packages', {})
self.set_property('bash_completions', set())
# Increase open-files limits
set_nofile_ulimit()
def set_property(self, name, value, force=False):
if name not in self._properties:
raise ConfigurationError('Unknown key %s' % name)
if force or getattr(self, name) is None:
setattr(self, name, value)
def get_recipes_repos(self):
recipes_dir = {'default': (self.recipes_dir, 0)}
for name, (path, priority) in self.external_recipes.items():
path = os.path.abspath(os.path.expanduser(path))
recipes_dir[name] = (path, priority)
return recipes_dir
def get_packages_repos(self):
packages_dir = {'default': (self.packages_dir, 0)}
for name, (path, priority) in self.external_packages.items():
path = os.path.abspath(os.path.expanduser(path))
packages_dir[name] = (path, priority)
return packages_dir
def recipe_commit(self, recipe_name):
if self.force_git_commit:
return self.force_git_commit
if recipe_name in self.recipes_commits:
return self.recipes_commits[recipe_name]
return None
def cross_compiling(self):
"Are we building for the host platform or not?"
# On Windows, building 32-bit on 64-bit is not cross-compilation since
# 32-bit Windows binaries run on 64-bit Windows via WOW64.
if self.platform == Platform.WINDOWS:
if self.arch == Architecture.X86_64 and \
self.target_arch == Architecture.X86:
return False
return self.target_platform != self.platform or \
self.target_arch != self.arch or \
self.target_distro_version != self.distro_version
def cross_universal_type(self):
if not self.cross_compiling():
return None
# cross-ios-universal, each arch prefix is merged and flattened into one prefix
if isinstance(self.universal_archs, list):
return 'flat'
# cross-android-universal, each arch prefix is separate
if isinstance(self.universal_archs, dict):
return 'normal'
return None
def prefix_is_executable(self):
"""Can the binaries from the target platform can be executed in the
build env?"""
if self.target_platform != self.platform:
return False
if self.target_arch != self.arch:
if self.target_arch == Architecture.X86 and \
self.arch == Architecture.X86_64:
return True
return False
return True
def prefix_is_build_tools(self):
return self.build_tools_prefix == self.prefix
def target_distro_version_gte(self, distro_version):
assert distro_version.startswith(self.target_distro + "_")
return self.target_distro_version >= distro_version
def _parse(self, filename, reset=True):
config = {'os': os, '__file__': filename, 'env' : self.config_env}
if not reset:
for prop in self._properties:
if hasattr(self, prop):
config[prop] = getattr(self, prop)
try:
parse_file(filename, config)
except:
raise ConfigurationError(_('Could not include config file (%s)') %
filename)
for key in self._properties:
if key in config:
self.set_property(key, config[key], True)
def _validate_properties(self):
if not validate_packager(self.packager):
raise FatalError(_('packager "%s" must be in the format '
'"Name <email>"') % self.packager)
def _check_uninstalled(self):
self.uninstalled = int(os.environ.get(CERBERO_UNINSTALLED, 0)) == 1
def _create_path(self, path):
if not os.path.exists(path):
try:
os.makedirs(path)
except:
raise FatalError(_('directory (%s) can not be created') % path)
def _join_path(self, path1, path2):
if len(path1) == 0:
return path2
if len(path2) == 0:
return path1
if self.platform == Platform.WINDOWS:
separator = ';'
else:
separator = ':'
return "%s%s%s" % (path1, separator, path2)
def _load_main_config(self):
if os.path.exists(DEFAULT_CONFIG_FILE):
m.message('Loading default configuration from {}'.format(DEFAULT_CONFIG_FILE))
self._parse(DEFAULT_CONFIG_FILE)
def _load_cmd_config(self, filenames):
if filenames is not None:
for f in filenames:
if not os.path.exists(f):
f = os.path.join(CONFIG_DIR, f + "." + CONFIG_EXT)
if os.path.exists(f):
self._parse(f, reset=False)
else:
raise ConfigurationError(_("Configuration file %s doesn't "
"exist") % f)
def _load_platform_config(self):
platform_config = os.path.join(self.environ_dir, '%s.config' %
self.target_platform)
arch_config = os.path.join(self.environ_dir, '%s_%s.config' %
(self.target_platform, self.target_arch))
for config_path in [platform_config, arch_config]:
if os.path.exists(config_path):
self._parse(config_path, reset=False)
def _load_last_defaults(self):
target_platform = self.target_platform
if target_platform == Platform.WINDOWS and 'visualstudio' in self.variants:
target_platform = 'msvc'
self.set_property('prefix', os.path.join(self.home_dir, "dist",
"%s_%s" % (target_platform, self.target_arch)))
self.set_property('sources', os.path.join(self.home_dir, "sources",
"%s_%s" % (target_platform, self.target_arch)))
self.set_property('logs', os.path.join(self.home_dir, "logs",
"%s_%s" % (target_platform, self.target_arch)))
self.set_property('cache_file',
"%s_%s.cache" % (target_platform, self.target_arch))
self.set_property('install_dir', self.prefix)
self.set_property('local_sources', self._default_local_sources_dir())
self.set_property('build_tools_prefix',
os.path.join(self.home_dir, 'build-tools'))
self.set_property('build_tools_sources',
os.path.join(self.home_dir, 'sources', 'build-tools'))
self.set_property('build_tools_cache', 'build-tools.cache')
def _find_data_dir(self):
if self.uninstalled:
self.data_dir = os.path.join(os.path.dirname(__file__),
'..', 'data')
self.data_dir = os.path.abspath(self.data_dir)
return
curdir = os.path.dirname(__file__)
while not os.path.exists(os.path.join(curdir, 'share', 'cerbero',
'config')):
curdir = os.path.abspath(os.path.join(curdir, '..'))
if curdir == '/' or curdir[1:] == ':/':
# We reached the root without finding the data dir, which
# shouldn't happen
raise FatalError("Data dir not found")
self.data_dir = os.path.join(curdir, 'share', 'cerbero')
def _relative_path(self, path):
if not self.uninstalled:
p = os.path.join(self.data_dir, path)
else:
p = os.path.join(os.path.dirname(__file__), '..', path)
return os.path.abspath(p)
def _default_home_dir(self):
if self.uninstalled:
p = os.path.join(os.path.dirname(__file__), '..', 'build')
else:
p = os.path.expanduser('~/cerbero')
return os.path.abspath(p)
def _default_local_sources_dir(self):
# For backwards-compatibility, keep the old value for setups that
# define their own home_dir inside which all cerbero work must be
# contained; f.ex. ci.gstreamer.net
if self.home_dir != self._default_home_dir():
return os.path.join(self.home_dir, 'sources', 'local')
# Default value should be in a user-specific location so that it can
# be shared across all cerbero directories and invocations
if self.platform == Platform.WINDOWS and 'USERPROFILE' in os.environ:
cache_dir = Path(os.environ['USERPROFILE']) / '.cache'
elif 'XDG_CACHE_HOME' in os.environ:
cache_dir = Path(os.environ['XDG_CACHE_HOME'])
else:
# Path.home() reads the HOME env var
cache_dir = Path.home() / '.cache'
return (cache_dir / 'cerbero-sources').as_posix()
@lru_cache()
def _perl_version(self):
try:
version = shell.check_output("perl -e 'print \"$]\";'")
except FatalError:
m.warning(_("Perl not found, you may need to run bootstrap."))
version = '0.000000'
# FIXME: when perl's mayor is >= 10
mayor = str(version[0])
minor = str(int(version[2:5]))
revision = str(int(version[5:8]))
return '.'.join([mayor, minor, revision])
|
import os
from config_helper import ConfigHelper
class Quarantine(object):
def __init__(self):
self.config = ConfigHelper()
def should_quarantine(self, event):
"""Returns an enriched event object, or False if the event is OK"""
print self.config
event["quarantine_group"] = self.config.quarantine_group_name
if (self.config.quarantine_trigger_only_on_critical is True and
event["critical"] is False):
pass
elif (event["type"] in self.config.quarantine_trigger_events and
event["server_group_name"] in self.config.quarantine_trigger_group_names):
return event
return False
removed debug message
import os
from config_helper import ConfigHelper
class Quarantine(object):
def __init__(self):
self.config = ConfigHelper()
def should_quarantine(self, event):
"""Returns an enriched event object, or False if the event is OK"""
event["quarantine_group"] = self.config.quarantine_group_name
if (self.config.quarantine_trigger_only_on_critical is True and
event["critical"] is False):
pass
elif (event["type"] in self.config.quarantine_trigger_events and
event["server_group_name"] in self.config.quarantine_trigger_group_names):
return event
return False
|
# Copyright 2014 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`topology` --- SCION topology parser
===========================================
"""
# Stdlib
from ipaddress import ip_address
import json
import logging
class Element(object):
"""
The Element class is the base class for elements specified in the topology
file.
:ivar addr: IP or SCION address of a server or edge router.
:type addr: :class:`IPv4Address` or :class:`IPv6Address`
:ivar name: element name or id
:type name: str
"""
def __init__(self, addr=None, name=None):
"""
Initialize an instance of the class Element.
:param addr: IP or SCION address of the element
:type addr: str
:param name: element name or id
:type name: str
"""
if addr is None:
self.addr = None
else:
try:
self.addr = ip_address(addr)
except ValueError:
# TODO (@syclops): When new address types are added here (e.g.
# SCION addresses), add the appropriate code to set the address
# here.
raise
if name is None:
self.name = None
else:
self.name = str(name)
class ServerElement(Element):
"""
The ServerElement class represents one of the servers in the AD.
"""
def __init__(self, server_dict=None, name=None):
"""
Initialize an instance of the class ServerElement.
:param server_dict: contains information about a particular server.
:type server_dict: dict
:param name: server element name or id
:type name: str
"""
super().__init__(server_dict['Addr'], name)
class InterfaceElement(Element):
"""
The InterfaceElement class represents one of the interfaces of an edge
router.
:ivar if_id: the interface ID.
:type if_id: int
:ivar neighbor_ad: the AD identifier of the neighbor AD.
:type neighbor_ad: int
:ivar neighbor_isd: the ISD identifier of the neighbor AD.
:type neighbor_isd: int
:ivar neighbor_type: the type of the neighbor relative to the AD to which
the interface belongs.
:type neighbor_type: str
:ivar to_udp_port: the port number receiving UDP traffic on the other end of
the interface.
:type to_udp_port: int
:ivar udp_port: the port number used to send UDP traffic.
:type udp_port: int
"""
def __init__(self, interface_dict=None, name=None):
"""
Initialize an instance of the class InterfaceElement.
:param interface_dict: contains information about the interface.
:type interface_dict: dict
"""
super().__init__(interface_dict['Addr'], name)
self.if_id = interface_dict['IFID']
self.neighbor_ad = interface_dict['NeighborAD']
self.neighbor_isd = interface_dict['NeighborISD']
self.neighbor_type = interface_dict['NeighborType']
self.to_udp_port = interface_dict['ToUdpPort']
self.udp_port = interface_dict['UdpPort']
to_addr = interface_dict['ToAddr']
if to_addr is None:
self.to_addr = None
else:
try:
self.to_addr = ip_address(to_addr)
except ValueError:
# TODO (@syclops): When new address types are added here (e.g.
# SCION addresses), add the appropriate code to set the address
# here.
raise
class RouterElement(Element):
"""
The RouterElement class represents one of the edge routers.
:ivar interface: one of the interfaces of the edge router.
:type interface: :class:`InterfaceElement`
"""
def __init__(self, router_dict=None, name=None):
"""
Initialize an instance of the class RouterElement.
:param router_dict: contains information about an edge router.
:type router_dict: dict
:param name: router element name or id
:type name: str
"""
super().__init__(router_dict['Addr'], name)
self.interface = InterfaceElement(router_dict['Interface'])
class Topology(object):
"""
The Topology class parses the topology file of an AD and stores such
information for further use.
:ivar is_core_ad: tells whether an AD is a core AD or not.
:vartype is_core_ad: bool
:ivar isd_id: the ISD identifier.
:vartype isd_id: int
:ivar ad_id: the AD identifier.
:vartype ad_id: int
:ivar dns_domain: the dns domain the dns servers should use.
:vartype dns_domain: str
:ivar beacon_servers: beacons servers in the AD.
:vartype beacon_servers: list
:ivar certificate_servers: certificate servers in the AD.
:vartype certificate_servers: list
:ivar dns_servers: dns servers in the AD.
:vartype dns_servers: list
:ivar path_servers: path servers in the AD.
:vartype path_servers: list
:ivar parent_edge_routers: edge routers linking the AD to its parents.
:vartype parent_edge_routers: list
:ivar child_edge_routers: edge routers linking the AD to its children.
:vartype child_edge_routers: list
:ivar peer_edge_routers: edge router linking the AD to its peers.
:vartype peer_edge_routers: list
:ivar routing_edge_routers: edge router linking the core AD to another core
AD.
:vartype routing_edge_routers: list
"""
def __init__(self):
"""
Initialize an instance of the class Topology.
"""
self.is_core_ad = False
self.isd_id = 0
self.ad_id = 0
self.dns_domain = ""
self.beacon_servers = []
self.certificate_servers = []
self.dns_servers = []
self.path_servers = []
self.parent_edge_routers = []
self.child_edge_routers = []
self.peer_edge_routers = []
self.routing_edge_routers = []
@classmethod
def from_file(cls, topology_file):
"""
Create a Topology instance from the file.
:param topology_file: path to the topology file
:type topology_file: str
:returns: the newly created Topology instance
:rtype: :class: `Topology`
"""
try:
with open(topology_file) as topo_fh:
topology_dict = json.load(topo_fh)
except (ValueError, KeyError, TypeError):
logging.error("Topology: JSON format error.")
return
return cls.from_dict(topology_dict)
@classmethod
def from_dict(cls, topology_dict):
"""
Create a Topology instance from the dictionary.
:param topology_dict: dictionary representation of a topology
:type topology_dict: dict
:returns: the newly created Topology instance
:rtype: :class:`Topology`
"""
topology = cls()
topology.parse_dict(topology_dict)
return topology
def parse_dict(self, topology):
"""
Parse a topology dictionary and populate the instance's attributes.
:param topology: dictionary representation of a topology
:type topology: dict
"""
self.is_core_ad = (topology['Core'] == 1)
self.isd_id = topology['ISDID']
self.ad_id = topology['ADID']
self.dns_domain = topology['DnsDomain']
for bs_key in topology['BeaconServers']:
b_server = ServerElement(topology['BeaconServers'][bs_key],
bs_key)
self.beacon_servers.append(b_server)
for cs_key in topology['CertificateServers']:
c_server = ServerElement(topology['CertificateServers'][cs_key],
cs_key)
self.certificate_servers.append(c_server)
for ds_key in topology['DNSServers']:
d_server = ServerElement(topology['DNSServers'][ds_key],
ds_key)
self.dns_servers.append(d_server)
for ps_key in topology['PathServers']:
p_server = ServerElement(topology['PathServers'][ps_key],
ps_key)
self.path_servers.append(p_server)
for er_key in topology['EdgeRouters']:
edge_router = RouterElement(topology['EdgeRouters'][er_key],
er_key)
if edge_router.interface.neighbor_type == 'PARENT':
self.parent_edge_routers.append(edge_router)
elif edge_router.interface.neighbor_type == 'CHILD':
self.child_edge_routers.append(edge_router)
elif edge_router.interface.neighbor_type == 'PEER':
self.peer_edge_routers.append(edge_router)
elif edge_router.interface.neighbor_type == 'ROUTING':
self.routing_edge_routers.append(edge_router)
else:
logging.warning("Encountered unknown neighbor type")
def get_all_edge_routers(self):
"""
Return all edge routers associated to the AD.
:returns: all edge routers associated to the AD.
:rtype: list
"""
all_edge_routers = []
all_edge_routers.extend(self.parent_edge_routers)
all_edge_routers.extend(self.child_edge_routers)
all_edge_routers.extend(self.peer_edge_routers)
all_edge_routers.extend(self.routing_edge_routers)
return all_edge_routers
def get_own_config(self, server_type, server_id):
"""
:param server_type:
:type server_type:
:param server_id:
:type server_id:
"""
target = None
if server_type == "bs":
target = self.beacon_servers
elif server_type == "cs":
target = self.certificate_servers
elif server_type == "ds":
target = self.dns_servers
elif server_type == "ps":
target = self.path_servers
elif server_type == "er":
target = self.get_all_edge_routers()
else:
logging.error("Unknown server type: \"%s\"", server_type)
for i in target:
if i.name == server_id:
return i
else:
logging.error("Could not find server %s%s-%s-%s", server_type,
self.isd_id, self.ad_id, server_id)
Minor edit
get_own_config should return before the for loop, if server_type
is bad.
# Copyright 2014 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`topology` --- SCION topology parser
===========================================
"""
# Stdlib
from ipaddress import ip_address
import json
import logging
class Element(object):
"""
The Element class is the base class for elements specified in the topology
file.
:ivar addr: IP or SCION address of a server or edge router.
:type addr: :class:`IPv4Address` or :class:`IPv6Address`
:ivar name: element name or id
:type name: str
"""
def __init__(self, addr=None, name=None):
"""
Initialize an instance of the class Element.
:param addr: IP or SCION address of the element
:type addr: str
:param name: element name or id
:type name: str
"""
if addr is None:
self.addr = None
else:
try:
self.addr = ip_address(addr)
except ValueError:
# TODO (@syclops): When new address types are added here (e.g.
# SCION addresses), add the appropriate code to set the address
# here.
raise
if name is None:
self.name = None
else:
self.name = str(name)
class ServerElement(Element):
"""
The ServerElement class represents one of the servers in the AD.
"""
def __init__(self, server_dict=None, name=None):
"""
Initialize an instance of the class ServerElement.
:param server_dict: contains information about a particular server.
:type server_dict: dict
:param name: server element name or id
:type name: str
"""
super().__init__(server_dict['Addr'], name)
class InterfaceElement(Element):
"""
The InterfaceElement class represents one of the interfaces of an edge
router.
:ivar if_id: the interface ID.
:type if_id: int
:ivar neighbor_ad: the AD identifier of the neighbor AD.
:type neighbor_ad: int
:ivar neighbor_isd: the ISD identifier of the neighbor AD.
:type neighbor_isd: int
:ivar neighbor_type: the type of the neighbor relative to the AD to which
the interface belongs.
:type neighbor_type: str
:ivar to_udp_port: the port number receiving UDP traffic on the other end of
the interface.
:type to_udp_port: int
:ivar udp_port: the port number used to send UDP traffic.
:type udp_port: int
"""
def __init__(self, interface_dict=None, name=None):
"""
Initialize an instance of the class InterfaceElement.
:param interface_dict: contains information about the interface.
:type interface_dict: dict
"""
super().__init__(interface_dict['Addr'], name)
self.if_id = interface_dict['IFID']
self.neighbor_ad = interface_dict['NeighborAD']
self.neighbor_isd = interface_dict['NeighborISD']
self.neighbor_type = interface_dict['NeighborType']
self.to_udp_port = interface_dict['ToUdpPort']
self.udp_port = interface_dict['UdpPort']
to_addr = interface_dict['ToAddr']
if to_addr is None:
self.to_addr = None
else:
try:
self.to_addr = ip_address(to_addr)
except ValueError:
# TODO (@syclops): When new address types are added here (e.g.
# SCION addresses), add the appropriate code to set the address
# here.
raise
class RouterElement(Element):
"""
The RouterElement class represents one of the edge routers.
:ivar interface: one of the interfaces of the edge router.
:type interface: :class:`InterfaceElement`
"""
def __init__(self, router_dict=None, name=None):
"""
Initialize an instance of the class RouterElement.
:param router_dict: contains information about an edge router.
:type router_dict: dict
:param name: router element name or id
:type name: str
"""
super().__init__(router_dict['Addr'], name)
self.interface = InterfaceElement(router_dict['Interface'])
class Topology(object):
"""
The Topology class parses the topology file of an AD and stores such
information for further use.
:ivar is_core_ad: tells whether an AD is a core AD or not.
:vartype is_core_ad: bool
:ivar isd_id: the ISD identifier.
:vartype isd_id: int
:ivar ad_id: the AD identifier.
:vartype ad_id: int
:ivar dns_domain: the dns domain the dns servers should use.
:vartype dns_domain: str
:ivar beacon_servers: beacons servers in the AD.
:vartype beacon_servers: list
:ivar certificate_servers: certificate servers in the AD.
:vartype certificate_servers: list
:ivar dns_servers: dns servers in the AD.
:vartype dns_servers: list
:ivar path_servers: path servers in the AD.
:vartype path_servers: list
:ivar parent_edge_routers: edge routers linking the AD to its parents.
:vartype parent_edge_routers: list
:ivar child_edge_routers: edge routers linking the AD to its children.
:vartype child_edge_routers: list
:ivar peer_edge_routers: edge router linking the AD to its peers.
:vartype peer_edge_routers: list
:ivar routing_edge_routers: edge router linking the core AD to another core
AD.
:vartype routing_edge_routers: list
"""
def __init__(self):
"""
Initialize an instance of the class Topology.
"""
self.is_core_ad = False
self.isd_id = 0
self.ad_id = 0
self.dns_domain = ""
self.beacon_servers = []
self.certificate_servers = []
self.dns_servers = []
self.path_servers = []
self.parent_edge_routers = []
self.child_edge_routers = []
self.peer_edge_routers = []
self.routing_edge_routers = []
@classmethod
def from_file(cls, topology_file):
"""
Create a Topology instance from the file.
:param topology_file: path to the topology file
:type topology_file: str
:returns: the newly created Topology instance
:rtype: :class: `Topology`
"""
try:
with open(topology_file) as topo_fh:
topology_dict = json.load(topo_fh)
except (ValueError, KeyError, TypeError):
logging.error("Topology: JSON format error.")
return
return cls.from_dict(topology_dict)
@classmethod
def from_dict(cls, topology_dict):
"""
Create a Topology instance from the dictionary.
:param topology_dict: dictionary representation of a topology
:type topology_dict: dict
:returns: the newly created Topology instance
:rtype: :class:`Topology`
"""
topology = cls()
topology.parse_dict(topology_dict)
return topology
def parse_dict(self, topology):
"""
Parse a topology dictionary and populate the instance's attributes.
:param topology: dictionary representation of a topology
:type topology: dict
"""
self.is_core_ad = (topology['Core'] == 1)
self.isd_id = topology['ISDID']
self.ad_id = topology['ADID']
self.dns_domain = topology['DnsDomain']
for bs_key in topology['BeaconServers']:
b_server = ServerElement(topology['BeaconServers'][bs_key],
bs_key)
self.beacon_servers.append(b_server)
for cs_key in topology['CertificateServers']:
c_server = ServerElement(topology['CertificateServers'][cs_key],
cs_key)
self.certificate_servers.append(c_server)
for ds_key in topology['DNSServers']:
d_server = ServerElement(topology['DNSServers'][ds_key],
ds_key)
self.dns_servers.append(d_server)
for ps_key in topology['PathServers']:
p_server = ServerElement(topology['PathServers'][ps_key],
ps_key)
self.path_servers.append(p_server)
for er_key in topology['EdgeRouters']:
edge_router = RouterElement(topology['EdgeRouters'][er_key],
er_key)
if edge_router.interface.neighbor_type == 'PARENT':
self.parent_edge_routers.append(edge_router)
elif edge_router.interface.neighbor_type == 'CHILD':
self.child_edge_routers.append(edge_router)
elif edge_router.interface.neighbor_type == 'PEER':
self.peer_edge_routers.append(edge_router)
elif edge_router.interface.neighbor_type == 'ROUTING':
self.routing_edge_routers.append(edge_router)
else:
logging.warning("Encountered unknown neighbor type")
def get_all_edge_routers(self):
"""
Return all edge routers associated to the AD.
:returns: all edge routers associated to the AD.
:rtype: list
"""
all_edge_routers = []
all_edge_routers.extend(self.parent_edge_routers)
all_edge_routers.extend(self.child_edge_routers)
all_edge_routers.extend(self.peer_edge_routers)
all_edge_routers.extend(self.routing_edge_routers)
return all_edge_routers
def get_own_config(self, server_type, server_id):
"""
:param server_type:
:type server_type:
:param server_id:
:type server_id:
"""
target = None
if server_type == "bs":
target = self.beacon_servers
elif server_type == "cs":
target = self.certificate_servers
elif server_type == "ds":
target = self.dns_servers
elif server_type == "ps":
target = self.path_servers
elif server_type == "er":
target = self.get_all_edge_routers()
else:
logging.error("Unknown server type: \"%s\"", server_type)
return
for i in target:
if i.name == server_id:
return i
else:
logging.error("Could not find server %s%s-%s-%s", server_type,
self.isd_id, self.ad_id, server_id)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Description {{{
"""
imposter.dbmanage
~~~~~~~~~~~~~~~~~
Application for maintaining the imposter database
It's mostly used for updating the database schema and data when upgrading
to newer imposter versions.
:copyright: (c) 2010 by Jochem Kossen.
:license: BSD, see LICENSE.txt for more details.
"""
# }}}
from flask import Flask
from migrate.versioning.api import version_control, upgrade, downgrade, db_version, version
from sqlalchemy.sql import and_
from models import User, Tag, Status, Format, Post, post_tags
from database import DB
from flaskjk import hashify, slugify
from datetime import datetime
import sys
import getpass
import datamigrations
app = Flask(__name__, static_path=None)
app.config.from_pyfile('config_admin.py')
app.config.from_envvar('IMPOSTER_DBMANAGE_CONFIG', silent=True)
db = app.config['DATABASE']
repo = 'migrations/'
def vc_db():
"""install SQLAlchemy-migrate versioning tables into database"""
version_control(url=db, repository=repo)
def upgrade_db(v=None):
"""upgrade database schema to latest version"""
from_version = db_version(url=db, repository=repo)
to_version = v
if to_version is None:
to_version = version(repository=repo)
print("Upgrading db from version %d to %d. " % (from_version, to_version))
print("Schema upgrade ... ")
upgrade(url=db, repository=repo, version=v)
print("Data upgrade ... ")
datamigrations.run_upgrade_scripts(app, from_version, to_version)
print("Done!")
def downgrade_db(v):
"""downgrade database schema to specified version"""
from_version = db_version(url=db, repository=repo)
to_version = int(v)
print("Downgrading db from version %d to %d. " % (from_version, to_version))
print("Schema upgrade ... ")
downgrade(url=db, repository=repo, version=v)
print("Data upgrade ... ")
datamigrations.run_downgrade_scripts(app, from_version, to_version)
print("Done!")
def add_initial_data():
"""Insert initial data into the database"""
# open database session
db_session = DB(db).get_session()
# ask user for an admin username and password
username = raw_input('Please enter the admin username: ')
password = getpass.getpass(prompt='Please enter the admin password: ')
# add user to database
u = User(username, hashify(app.config['SECRET_KEY'], password))
db_session.add(u)
# create statuses
s1 = Status('draft')
s2 = Status('private')
s3 = Status('public')
db_session.add(s1)
db_session.add(s2)
db_session.add(s3)
# create formats
f = Format('rest')
f2 = Format('markdown')
db_session.add(f)
db_session.add(f2)
# Tags
t1 = Tag('imposter')
t2 = Tag('weblog')
# build initial post and put it in the database
initial_post_summary = """
Installed Correctly!
"""
initial_post_content = """
Imposter was installed correctly!
This is just a sample post to show Imposter works.
**Have a lot of fun blogging!**
"""
p1 = Post('Welcome to Imposter!', initial_post_summary, initial_post_content)
p1.slug = slugify(p1.title)
p1.createdate = datetime.now()
p1.lastmoddate = datetime.now()
p1.pubdate = datetime.now()
p1.format = f
p1.status = s3
p1.user = u
p1.tags = [t1, t2]
p1.compile()
db_session.add(p1)
db_session.commit()
def install_db():
"""Initialize new Imposter database"""
vc_db()
upgrade_db()
add_initial_data()
def usage():
"""show dbmanage.py usage"""
print 'usage: dbmanage.py install|upgrade|downgrade version'
#---------------------------------------------------------------------------
# MAIN RUN LOOP
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
sys.exit(1)
if sys.argv[1] == 'install':
install_db()
elif sys.argv[1] == 'upgrade':
upgrade_db()
elif sys.argv[1] == 'downgrade' and len(sys.argv) == 3:
downgrade_db(sys.argv[2])
else:
usage()
sys.exit(1)
Switch to blowfish encryption
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Description {{{
"""
imposter.dbmanage
~~~~~~~~~~~~~~~~~
Application for maintaining the imposter database
It's mostly used for updating the database schema and data when upgrading
to newer imposter versions.
:copyright: (c) 2010 by Jochem Kossen.
:license: BSD, see LICENSE.txt for more details.
"""
# }}}
from flask import Flask
from migrate.versioning.api import version_control, upgrade, downgrade, db_version, version
from sqlalchemy.sql import and_
from models import User, Tag, Status, Format, Post, post_tags
from database import DB
from flaskjk import encrypt_password, slugify
from datetime import datetime
import sys
import getpass
import datamigrations
app = Flask(__name__, static_path=None)
app.config.from_pyfile('config_admin.py')
app.config.from_envvar('IMPOSTER_DBMANAGE_CONFIG', silent=True)
db = app.config['DATABASE']
repo = 'migrations/'
def vc_db():
"""install SQLAlchemy-migrate versioning tables into database"""
version_control(url=db, repository=repo)
def upgrade_db(v=None):
"""upgrade database schema to latest version"""
from_version = db_version(url=db, repository=repo)
to_version = v
if to_version is None:
to_version = version(repository=repo)
print("Upgrading db from version %d to %d. " % (from_version, to_version))
print("Schema upgrade ... ")
upgrade(url=db, repository=repo, version=v)
print("Data upgrade ... ")
datamigrations.run_upgrade_scripts(app, from_version, to_version)
print("Done!")
def downgrade_db(v):
"""downgrade database schema to specified version"""
from_version = db_version(url=db, repository=repo)
to_version = int(v)
print("Downgrading db from version %d to %d. " % (from_version, to_version))
print("Schema upgrade ... ")
downgrade(url=db, repository=repo, version=v)
print("Data upgrade ... ")
datamigrations.run_downgrade_scripts(app, from_version, to_version)
print("Done!")
def add_initial_data():
"""Insert initial data into the database"""
# open database session
db_session = DB(db).get_session()
# ask user for an admin username and password
username = raw_input('Please enter the admin username: ')
password = getpass.getpass(prompt='Please enter the admin password: ')
# add user to database
u = User(username, encrypt_password(app.config['SECRET_KEY'], password))
db_session.add(u)
# create statuses
s1 = Status('draft')
s2 = Status('private')
s3 = Status('public')
db_session.add(s1)
db_session.add(s2)
db_session.add(s3)
# create formats
f = Format('rest')
f2 = Format('markdown')
db_session.add(f)
db_session.add(f2)
# Tags
t1 = Tag('imposter')
t2 = Tag('weblog')
# build initial post and put it in the database
initial_post_summary = """
Installed Correctly!
"""
initial_post_content = """
Imposter was installed correctly!
This is just a sample post to show Imposter works.
**Have a lot of fun blogging!**
"""
p1 = Post('Welcome to Imposter!', initial_post_summary, initial_post_content)
p1.slug = slugify(p1.title)
p1.createdate = datetime.now()
p1.lastmoddate = datetime.now()
p1.pubdate = datetime.now()
p1.format = f
p1.status = s3
p1.user = u
p1.tags = [t1, t2]
p1.compile()
db_session.add(p1)
db_session.commit()
def install_db():
"""Initialize new Imposter database"""
vc_db()
upgrade_db()
add_initial_data()
def usage():
"""show dbmanage.py usage"""
print 'usage: dbmanage.py install|upgrade|downgrade version'
#---------------------------------------------------------------------------
# MAIN RUN LOOP
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
sys.exit(1)
if sys.argv[1] == 'install':
install_db()
elif sys.argv[1] == 'upgrade':
upgrade_db()
elif sys.argv[1] == 'downgrade' and len(sys.argv) == 3:
downgrade_db(sys.argv[2])
else:
usage()
sys.exit(1)
|
#!/usr/bin/env python3.4
# -*- coding: utf8 -*-
import sys
import os.path
import locale
import contextlib
from pprint import pprint
import operator
import argparse
import configparser
import json
import csv
import gettext
from oxy.arg import parse as argparse
from oxy.mssql import Mssql
from oxy.firebird import Firebird
from oxy.usual import VerboseOutput
def count_field(valIni, fieldBreak):
''' Closure to "count" function variable '''
ini = int(valIni)
breakVal = None
def inner_func(dictRow):
nonlocal ini, breakVal
if fieldBreak:
if (not breakVal) or breakVal != dictRow[fieldBreak]:
breakVal = dictRow[fieldBreak]
ini = int(valIni)
result = ini
ini += 1
return result
return inner_func
def translate_field(queryDict):
''' Closure to "translate" function variable '''
query = queryDict
def inner_func(dictRow):
nonlocal query
result = None
with open(query['from']) as transFile:
readCsv = csv.reader(transFile, delimiter=';')
columns = None
for row in readCsv:
if columns:
csvRow = dict(zip(columns, row))
bOk = True
for cond in query['where']:
bOk = bOk and csvRow[cond[0]] == dictRow[cond[-1]]
if bOk:
result = csvRow[query['select']]
break
else:
columns = row
if (not result) and ('default' in query):
result = query['default']
if ('type' in query) and (query['type'] == 'n'):
result = float(result)
return result
return inner_func
def trim_field(fieldName):
''' Closure to "trim" function variable '''
field = fieldName
def inner_func(dictRow):
nonlocal field
if isinstance(dictRow[field], str):
result = dictRow[field].strip()
else:
result = ''
return result
return inner_func
def str_field(methodDict, variable):
''' Closure to str methods variable '''
method = methodDict
fieldName = variable
def inner_func(dictRow):
nonlocal method
result = ''
if 'field' in method:
field = method['field']
else:
field = fieldName
if isinstance(dictRow[field], str):
if method['method'] == 'rjust':
size = int(method['args'][0])
fill = method['args'][1]
result = dictRow[field].rjust(size, fill)
elif method['method'] == 'strip':
result = dictRow[field].strip()
elif method['method'] == 'char':
pos = int(method['args'][0])
result = dictRow[field][pos]
elif method['method'] == 'slice':
ini = int(method['args'][0])
if len(method['args']) > 1 and method['args'][1] != '':
end = int(method['args'][1])
else:
end = None
if len(method['args']) > 2 and method['args'][2] != '':
step = int(method['args'][2])
else:
step = None
result = dictRow[field][ini:end:step]
return result
return inner_func
class Main:
def checkFile(self, fileName, description, exitError):
self.vOut.prnt(description + ' file: {}'.format(fileName))
if not os.path.exists(fileName):
print('"{}" file "{}" does not exist'.format(
description, fileName))
sys.exit(exitError)
def fileWithDefaultDir(self, dire, fileName):
path, name = os.path.split(fileName)
if not path:
path = dire
return os.path.join(path, name)
def connectDataBase(self):
dbfrom = 'db.from.{}'.format(self.iniConfig.get('read', 'db'))
dbms = self.config.get(dbfrom, 'dbms')
if dbms == 'mssql':
self.db = Mssql(self.config.get(dbfrom, 'username'),
self.config.get(dbfrom, 'password'),
self.config.get(dbfrom, 'hostname'),
self.config.get(dbfrom, 'port'),
self.config.get(dbfrom, 'database'),
self.config.get(dbfrom, 'schema'))
elif dbms == 'firebird':
self.db = Firebird(self.config.get(dbfrom, 'username'),
self.config.get(dbfrom, 'password'),
self.config.get(dbfrom, 'hostname'),
self.config.get(dbfrom, 'database'),
self.config.get(dbfrom, 'charset'))
else:
raise NameError(
'For now, script is not prepared for "'+dbms+'".')
self.db.connect()
def closeDataBase(self):
self.db.disconnect()
def __init__(self):
self.main()
def parseArgs(self):
parser = argparse.ArgumentParser(
description=_('Write CSV from Mssql database'),
epilog="(c) Tussor & Oxigenai",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"iniFile",
help='data group INI file name, in the format '
'[dir/]data_group_name[.version].ini')
parser.add_argument(
"csvFile",
help='CSV file to be created '
'[dir/]_file_name.csv')
parser.add_argument(
"--cfg", "--cfgfile",
type=str,
default='tuple-load.cfg',
help='config file of data access and groups')
parser.add_argument(
"--ini", "--inidir",
type=str,
default='ini',
help='default directory for ini files')
parser.add_argument(
"--csv", "--csvdir",
type=str,
default='csv',
help='default directory for csv files')
parser.add_argument(
"--json", "--jsondir",
type=str,
default='json',
help='default directory for json files')
parser.add_argument(
"-v", "--verbosity", action="count", default=0,
help="increase output verbosity")
self.args = parser.parse_args()
self.args.iniFile = \
self.fileWithDefaultDir(self.args.ini, self.args.iniFile)
self.args.csvFile = \
self.fileWithDefaultDir(self.args.csv, self.args.csvFile)
def configProcess(self):
self.vOut.prnt('->configProcess', 2)
self.checkFile(self.args.iniFile, 'INI', 11)
self.checkFile(self.args.cfg, 'Config', 12)
self.iniConfig = configparser.RawConfigParser()
self.iniConfig.read(self.args.iniFile)
self.config = configparser.RawConfigParser()
self.config.read(self.args.cfg)
with contextlib.suppress(FileNotFoundError):
os.remove(self.args.csvFile)
if os.path.exists('secret.py'):
from secret import DBSECRET
for dbkey in DBSECRET:
self.config[dbkey].update(DBSECRET[dbkey])
def run(self):
self.vOut.prnt('->run', 2)
locale.setlocale(locale.LC_ALL, 'pt_BR.utf8')
try:
self.connectDataBase()
with open(self.args.csvFile, 'w') as self.csvNew:
self.executeQueries()
finally:
self.closeDataBase()
self.doPostProcess()
def getElementDef(self, aList, aKey, default):
return default if aKey not in aList else aList[aKey]
def loadFunctionVariables(self):
self.vOut.prnt('->loadFunctionVariables', 3)
dictRowFunctions = []
if 'functions' in self.iniConfig.sections():
self.vOut.pprnt(self.iniConfig.items("functions"), 4)
for variable, value in self.iniConfig.items('functions'):
self.vOut.prnt('variable: {}'.format(variable), 4)
varParams = json.loads(value)
if 'count' in varParams:
funcParams = varParams['count']
dictRowFunctions.append([variable, count_field(
self.getElementDef(funcParams, 'start', '1'),
self.getElementDef(funcParams, 'break', None))
])
elif 'translate' in varParams:
funcParams = varParams['translate']
funcParams['from'] = self.fileWithDefaultDir(
self.args.csv, funcParams['from'])
dictRowFunctions.append(
[variable, translate_field(funcParams)])
elif 'trim' in varParams:
funcParams = varParams['trim']
dictRowFunctions.append(
[variable, trim_field(funcParams['field'])])
elif 'str' in varParams:
funcParams = varParams['str']
dictRowFunctions.append(
[variable, str_field(funcParams, variable)])
return dictRowFunctions
def addVariablesToRow(self, dictRow):
if 'variables' in self.iniConfig.sections():
for variable, value in self.iniConfig.items('variables'):
varParams = json.loads(value)
if 'value' in varParams.keys():
dictRow[variable] = varParams['value']
def execFunctionsToRow(self, dictRowFunctions, dictRow):
self.vOut.prnt('->execFunctionsToRow', 4)
for function in dictRowFunctions:
self.vOut.prnt('column: {}'.format(function[0]), 4)
dictRow[function[0]] = function[1](dictRow)
def executeQueries(self):
self.vOut.prnt('->executeQueries', 2)
self.doHeader = True
for i in range(10):
if i == 0:
sqlVar = 'sql'
else:
sqlVar = 'sql{}'.format(i)
if sqlVar in list(self.iniConfig['read']):
self.vOut.prnt('sql = {}'.format(sqlVar), 3)
sqlF = self.iniConfig.get('read', sqlVar)
self.executeQuery(sqlF)
def executeQuery(self, sqlF):
curF = self.db.cursorExecute(sqlF)
columns = [column[0].lower() for column in curF.description]
dictRowFunctions = self.loadFunctionVariables()
headerLine = ''
while True:
row = curF.fetchone()
if not row:
break
dictRow = dict(zip(columns, row))
self.addVariablesToRow(dictRow)
self.execFunctionsToRow(dictRowFunctions, dictRow)
dataLine = ''
separator = ''
for column, spec in self.iniConfig.items("columns"):
# print(column, spec)
colType = spec[0]
colParams = {}
if len(spec) > 2:
colParams = json.loads(spec[2:])
colValue = None
# A column can be made by processing others, so it can not
# exist in dictRow
if column in dictRow.keys():
colValue = dictRow[column]
if colType == 't':
if not colValue:
colValue = ''
if 'format' in colParams:
if 'fields' in colParams:
colFieldValues = \
tuple((dictRow[c]
for c in colParams['fields']))
else:
colFieldValues = tuple(colValue,)
colValue = \
colParams['format'] % tuple(colFieldValues)
colValue = \
'"{}"'.format(colValue.replace('"', '""'))
elif colType == 'n':
if not colValue:
colValue = 0
colValue = \
locale.format(colParams['format'], colValue)
elif colType == 'd':
if not colValue:
colValue = ''
else:
# print(type(dictRow[column]))
colValue = colValue.strftime(colParams['format'])
dataLine += '{}{}'.format(separator, colValue)
if self.doHeader:
headerLine += '{}"{}"'.format(separator, column.upper())
separator = ';'
if self.doHeader:
# print(headerLine)
self.csvNew.write('{}\n'.format(headerLine))
self.doHeader = False
# print(dataLine)
self.csvNew.write('{}\n'.format(dataLine))
# sys.exit(2)
def sortCsv(self, args):
self.vOut.prnt('->sortCsv', 3)
reader = csv.reader(open(self.args.csvFile), delimiter=';')
cab = next(reader)
sortedlist = sorted(reader, key=operator.itemgetter(0))
filename, file_extension = os.path.splitext(self.args.csvFile)
writer = csv.writer(
open('{}.sorted{}'.format(filename, file_extension),
'w', newline=''),
delimiter=';',
quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(cab)
for row in sortedlist:
writer.writerow(row)
def doPostProcess(self):
self.vOut.prnt('->doPostProcess', 3)
if 'post_process' in self.iniConfig.sections():
self.vOut.pprnt(self.iniConfig.items('post_process'), 4)
for variable, value in self.iniConfig.items('post_process'):
self.vOut.pprnt(variable, 4)
if variable == 'sort':
self.sortCsv(value)
def main(self):
self.parseArgs()
self.vOut = VerboseOutput(self.args.verbosity)
self.configProcess()
self.run()
if __name__ == '__main__':
tupleLoadGT = gettext.translation('tuple-load', 'po', fallback=True)
tupleLoadGT.install()
Main()
Selecting sort field
#!/usr/bin/env python3.4
# -*- coding: utf8 -*-
import sys
import os.path
import locale
import contextlib
from pprint import pprint
import operator
import argparse
import configparser
import json
import csv
import gettext
from oxy.arg import parse as argparse
from oxy.mssql import Mssql
from oxy.firebird import Firebird
from oxy.usual import VerboseOutput
def count_field(valIni, fieldBreak):
''' Closure to "count" function variable '''
ini = int(valIni)
breakVal = None
def inner_func(dictRow):
nonlocal ini, breakVal
if fieldBreak:
if (not breakVal) or breakVal != dictRow[fieldBreak]:
breakVal = dictRow[fieldBreak]
ini = int(valIni)
result = ini
ini += 1
return result
return inner_func
def translate_field(queryDict):
''' Closure to "translate" function variable '''
query = queryDict
def inner_func(dictRow):
nonlocal query
result = None
with open(query['from']) as transFile:
readCsv = csv.reader(transFile, delimiter=';')
columns = None
for row in readCsv:
if columns:
csvRow = dict(zip(columns, row))
bOk = True
for cond in query['where']:
bOk = bOk and csvRow[cond[0]] == dictRow[cond[-1]]
if bOk:
result = csvRow[query['select']]
break
else:
columns = row
if (not result) and ('default' in query):
result = query['default']
if ('type' in query) and (query['type'] == 'n'):
result = float(result)
return result
return inner_func
def trim_field(fieldName):
''' Closure to "trim" function variable '''
field = fieldName
def inner_func(dictRow):
nonlocal field
if isinstance(dictRow[field], str):
result = dictRow[field].strip()
else:
result = ''
return result
return inner_func
def str_field(methodDict, variable):
''' Closure to str methods variable '''
method = methodDict
fieldName = variable
def inner_func(dictRow):
nonlocal method
result = ''
if 'field' in method:
field = method['field']
else:
field = fieldName
if isinstance(dictRow[field], str):
if method['method'] == 'rjust':
size = int(method['args'][0])
fill = method['args'][1]
result = dictRow[field].rjust(size, fill)
elif method['method'] == 'strip':
result = dictRow[field].strip()
elif method['method'] == 'char':
pos = int(method['args'][0])
result = dictRow[field][pos]
elif method['method'] == 'slice':
ini = int(method['args'][0])
if len(method['args']) > 1 and method['args'][1] != '':
end = int(method['args'][1])
else:
end = None
if len(method['args']) > 2 and method['args'][2] != '':
step = int(method['args'][2])
else:
step = None
result = dictRow[field][ini:end:step]
return result
return inner_func
class Main:
def checkFile(self, fileName, description, exitError):
self.vOut.prnt(description + ' file: {}'.format(fileName))
if not os.path.exists(fileName):
print('"{}" file "{}" does not exist'.format(
description, fileName))
sys.exit(exitError)
def fileWithDefaultDir(self, dire, fileName):
path, name = os.path.split(fileName)
if not path:
path = dire
return os.path.join(path, name)
def connectDataBase(self):
dbfrom = 'db.from.{}'.format(self.iniConfig.get('read', 'db'))
dbms = self.config.get(dbfrom, 'dbms')
if dbms == 'mssql':
self.db = Mssql(self.config.get(dbfrom, 'username'),
self.config.get(dbfrom, 'password'),
self.config.get(dbfrom, 'hostname'),
self.config.get(dbfrom, 'port'),
self.config.get(dbfrom, 'database'),
self.config.get(dbfrom, 'schema'))
elif dbms == 'firebird':
self.db = Firebird(self.config.get(dbfrom, 'username'),
self.config.get(dbfrom, 'password'),
self.config.get(dbfrom, 'hostname'),
self.config.get(dbfrom, 'database'),
self.config.get(dbfrom, 'charset'))
else:
raise NameError(
'For now, script is not prepared for "'+dbms+'".')
self.db.connect()
def closeDataBase(self):
self.db.disconnect()
def __init__(self):
self.main()
def parseArgs(self):
parser = argparse.ArgumentParser(
description=_('Write CSV from Mssql database'),
epilog="(c) Tussor & Oxigenai",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"iniFile",
help='data group INI file name, in the format '
'[dir/]data_group_name[.version].ini')
parser.add_argument(
"csvFile",
help='CSV file to be created '
'[dir/]_file_name.csv')
parser.add_argument(
"--cfg", "--cfgfile",
type=str,
default='tuple-load.cfg',
help='config file of data access and groups')
parser.add_argument(
"--ini", "--inidir",
type=str,
default='ini',
help='default directory for ini files')
parser.add_argument(
"--csv", "--csvdir",
type=str,
default='csv',
help='default directory for csv files')
parser.add_argument(
"--json", "--jsondir",
type=str,
default='json',
help='default directory for json files')
parser.add_argument(
"-v", "--verbosity", action="count", default=0,
help="increase output verbosity")
self.args = parser.parse_args()
self.args.iniFile = \
self.fileWithDefaultDir(self.args.ini, self.args.iniFile)
self.args.csvFile = \
self.fileWithDefaultDir(self.args.csv, self.args.csvFile)
def configProcess(self):
self.vOut.prnt('->configProcess', 2)
self.checkFile(self.args.iniFile, 'INI', 11)
self.checkFile(self.args.cfg, 'Config', 12)
self.iniConfig = configparser.RawConfigParser()
self.iniConfig.read(self.args.iniFile)
self.config = configparser.RawConfigParser()
self.config.read(self.args.cfg)
with contextlib.suppress(FileNotFoundError):
os.remove(self.args.csvFile)
if os.path.exists('secret.py'):
from secret import DBSECRET
for dbkey in DBSECRET:
self.config[dbkey].update(DBSECRET[dbkey])
def run(self):
self.vOut.prnt('->run', 2)
locale.setlocale(locale.LC_ALL, 'pt_BR.utf8')
try:
self.connectDataBase()
with open(self.args.csvFile, 'w') as self.csvNew:
self.executeQueries()
finally:
self.closeDataBase()
self.doPostProcess()
def getElementDef(self, aList, aKey, default):
return default if aKey not in aList else aList[aKey]
def loadFunctionVariables(self):
self.vOut.prnt('->loadFunctionVariables', 3)
dictRowFunctions = []
if 'functions' in self.iniConfig.sections():
self.vOut.pprnt(self.iniConfig.items("functions"), 4)
for variable, value in self.iniConfig.items('functions'):
self.vOut.prnt('variable: {}'.format(variable), 4)
varParams = json.loads(value)
if 'count' in varParams:
funcParams = varParams['count']
dictRowFunctions.append([variable, count_field(
self.getElementDef(funcParams, 'start', '1'),
self.getElementDef(funcParams, 'break', None))
])
elif 'translate' in varParams:
funcParams = varParams['translate']
funcParams['from'] = self.fileWithDefaultDir(
self.args.csv, funcParams['from'])
dictRowFunctions.append(
[variable, translate_field(funcParams)])
elif 'trim' in varParams:
funcParams = varParams['trim']
dictRowFunctions.append(
[variable, trim_field(funcParams['field'])])
elif 'str' in varParams:
funcParams = varParams['str']
dictRowFunctions.append(
[variable, str_field(funcParams, variable)])
return dictRowFunctions
def addVariablesToRow(self, dictRow):
if 'variables' in self.iniConfig.sections():
for variable, value in self.iniConfig.items('variables'):
varParams = json.loads(value)
if 'value' in varParams.keys():
dictRow[variable] = varParams['value']
def execFunctionsToRow(self, dictRowFunctions, dictRow):
self.vOut.prnt('->execFunctionsToRow', 4)
for function in dictRowFunctions:
self.vOut.prnt('column: {}'.format(function[0]), 4)
dictRow[function[0]] = function[1](dictRow)
def executeQueries(self):
self.vOut.prnt('->executeQueries', 2)
self.doHeader = True
for i in range(10):
if i == 0:
sqlVar = 'sql'
else:
sqlVar = 'sql{}'.format(i)
if sqlVar in list(self.iniConfig['read']):
self.vOut.prnt('sql = {}'.format(sqlVar), 3)
sqlF = self.iniConfig.get('read', sqlVar)
self.executeQuery(sqlF)
def executeQuery(self, sqlF):
curF = self.db.cursorExecute(sqlF)
columns = [column[0].lower() for column in curF.description]
dictRowFunctions = self.loadFunctionVariables()
headerLine = ''
while True:
row = curF.fetchone()
if not row:
break
dictRow = dict(zip(columns, row))
self.addVariablesToRow(dictRow)
self.execFunctionsToRow(dictRowFunctions, dictRow)
dataLine = ''
separator = ''
for column, spec in self.iniConfig.items("columns"):
# print(column, spec)
colType = spec[0]
colParams = {}
if len(spec) > 2:
colParams = json.loads(spec[2:])
colValue = None
# A column can be made by processing others, so it can not
# exist in dictRow
if column in dictRow.keys():
colValue = dictRow[column]
if colType == 't':
if not colValue:
colValue = ''
if 'format' in colParams:
if 'fields' in colParams:
colFieldValues = \
tuple((dictRow[c]
for c in colParams['fields']))
else:
colFieldValues = tuple(colValue,)
colValue = \
colParams['format'] % tuple(colFieldValues)
colValue = \
'"{}"'.format(colValue.replace('"', '""'))
elif colType == 'n':
if not colValue:
colValue = 0
colValue = \
locale.format(colParams['format'], colValue)
elif colType == 'd':
if not colValue:
colValue = ''
else:
# print(type(dictRow[column]))
colValue = colValue.strftime(colParams['format'])
dataLine += '{}{}'.format(separator, colValue)
if self.doHeader:
headerLine += '{}"{}"'.format(separator, column.upper())
separator = ';'
if self.doHeader:
# print(headerLine)
self.csvNew.write('{}\n'.format(headerLine))
self.doHeader = False
# print(dataLine)
self.csvNew.write('{}\n'.format(dataLine))
# sys.exit(2)
def sortCsv(self, rule):
self.vOut.prnt('->sortCsv', 3)
varParams = json.loads(rule)
self.vOut.pprnt(varParams, 3)
reader = csv.reader(open(self.args.csvFile), delimiter=';')
cab = next(reader)
keyFieldIndex = cab.index(varParams['field'].upper())
sortedlist = sorted(reader, key=operator.itemgetter(keyFieldIndex))
filename, file_extension = os.path.splitext(self.args.csvFile)
writer = csv.writer(
open('{}.sorted{}'.format(filename, file_extension),
'w', newline=''),
delimiter=';',
quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(cab)
for row in sortedlist:
writer.writerow(row)
def doPostProcess(self):
self.vOut.prnt('->doPostProcess', 3)
if 'post_process' in self.iniConfig.sections():
self.vOut.pprnt(self.iniConfig.items('post_process'), 4)
for variable, value in self.iniConfig.items('post_process'):
self.vOut.pprnt(variable, 4)
if variable == 'sort':
self.sortCsv(value)
def main(self):
self.parseArgs()
self.vOut = VerboseOutput(self.args.verbosity)
self.configProcess()
self.run()
if __name__ == '__main__':
tupleLoadGT = gettext.translation('tuple-load', 'po', fallback=True)
tupleLoadGT.install()
Main()
|
from dbt.logger import initialize_logger, GLOBAL_LOGGER as logger, \
logger_initialized, log_cache_events
import argparse
import os.path
import sys
import traceback
import dbt.version
import dbt.flags as flags
import dbt.task.run as run_task
import dbt.task.compile as compile_task
import dbt.task.debug as debug_task
import dbt.task.clean as clean_task
import dbt.task.deps as deps_task
import dbt.task.init as init_task
import dbt.task.seed as seed_task
import dbt.task.test as test_task
import dbt.task.archive as archive_task
import dbt.task.generate as generate_task
import dbt.task.serve as serve_task
from dbt.adapters.factory import reset_adapters
import dbt.tracking
import dbt.ui.printer
import dbt.compat
import dbt.deprecations
import dbt.profiler
from dbt.utils import ExitCodes
from dbt.config import Project, UserConfig, RuntimeConfig, PROFILES_DIR, \
read_profiles
from dbt.exceptions import DbtProjectError, DbtProfileError, RuntimeException
PROFILES_HELP_MESSAGE = """
For more information on configuring profiles, please consult the dbt docs:
https://docs.getdbt.com/docs/configure-your-profile
"""
class DBTVersion(argparse.Action):
"""This is very very similar to the builtin argparse._Version action,
except it just calls dbt.version.get_version_information().
"""
def __init__(self,
option_strings,
version=None,
dest=argparse.SUPPRESS,
default=argparse.SUPPRESS,
help="show program's version number and exit"):
super(DBTVersion, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
formatter = parser._get_formatter()
formatter.add_text(dbt.version.get_version_information())
parser.exit(message=formatter.format_help())
class DBTArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super(DBTArgumentParser, self).__init__(*args, **kwargs)
self.register('action', 'dbtversion', DBTVersion)
def main(args=None):
if args is None:
args = sys.argv[1:]
try:
results, succeeded = handle_and_check(args)
if succeeded:
exit_code = ExitCodes.Success
else:
exit_code = ExitCodes.ModelError
except KeyboardInterrupt as e:
logger.info("ctrl-c")
exit_code = ExitCodes.UnhandledError
# This can be thrown by eg. argparse
except SystemExit as e:
exit_code = e.code
except BaseException as e:
logger.info("Encountered an error:")
logger.info(str(e))
if logger_initialized():
logger.debug(traceback.format_exc())
elif not isinstance(e, RuntimeException):
# if it did not come from dbt proper and the logger is not
# initialized (so there's no safe path to log to), log the stack
# trace at error level.
logger.error(traceback.format_exc())
exit_code = ExitCodes.UnhandledError
sys.exit(exit_code)
# here for backwards compatibility
def handle(args):
res, success = handle_and_check(args)
return res
def initialize_config_values(parsed):
"""Given the parsed args, initialize the dbt tracking code.
It would be nice to re-use this profile later on instead of parsing it
twice, but dbt's intialization is not structured in a way that makes that
easy.
"""
try:
cfg = UserConfig.from_directory(parsed.profiles_dir)
except RuntimeException:
cfg = UserConfig.from_dict(None)
if cfg.send_anonymous_usage_stats:
dbt.tracking.initialize_tracking(parsed.profiles_dir)
else:
dbt.tracking.do_not_track()
if cfg.use_colors:
dbt.ui.printer.use_colors()
def handle_and_check(args):
parsed = parse_args(args)
profiler_enabled = False
if parsed.record_timing_info:
profiler_enabled = True
with dbt.profiler.profiler(
enable=profiler_enabled,
outfile=parsed.record_timing_info
):
initialize_config_values(parsed)
reset_adapters()
try:
task, res = run_from_args(parsed)
finally:
dbt.tracking.flush()
success = task.interpret_results(res)
return res, success
def get_nearest_project_dir():
root_path = os.path.abspath(os.sep)
cwd = os.getcwd()
while cwd != root_path:
project_file = os.path.join(cwd, "dbt_project.yml")
if os.path.exists(project_file):
return cwd
cwd = os.path.dirname(cwd)
return None
def run_from_args(parsed):
task = None
cfg = None
if parsed.which in ('init', 'debug'):
# bypass looking for a project file if we're running `dbt init` or
# `dbt debug`
task = parsed.cls(args=parsed)
else:
nearest_project_dir = get_nearest_project_dir()
if nearest_project_dir is None:
raise RuntimeException(
"fatal: Not a dbt project (or any of the parent directories). "
"Missing dbt_project.yml file"
)
os.chdir(nearest_project_dir)
res = invoke_dbt(parsed)
if res is None:
raise RuntimeException("Could not run dbt")
else:
task, cfg = res
log_path = None
if cfg is not None:
log_path = cfg.log_path
initialize_logger(parsed.debug, log_path)
logger.debug("Tracking: {}".format(dbt.tracking.active_user.state()))
dbt.tracking.track_invocation_start(config=cfg, args=parsed)
results = run_from_task(task, cfg, parsed)
return task, results
def run_from_task(task, cfg, parsed_args):
result = None
try:
result = task.run()
dbt.tracking.track_invocation_end(
config=cfg, args=parsed_args, result_type="ok"
)
except (dbt.exceptions.NotImplementedException,
dbt.exceptions.FailedToConnectException) as e:
logger.info('ERROR: {}'.format(e))
dbt.tracking.track_invocation_end(
config=cfg, args=parsed_args, result_type="error"
)
except Exception as e:
dbt.tracking.track_invocation_end(
config=cfg, args=parsed_args, result_type="error"
)
raise
return result
def invoke_dbt(parsed):
task = None
cfg = None
log_cache_events(getattr(parsed, 'log_cache_events', False))
try:
if parsed.which in {'deps', 'clean'}:
# deps doesn't need a profile, so don't require one.
cfg = Project.from_current_directory(getattr(parsed, 'vars', '{}'))
elif parsed.which != 'debug':
# for debug, we will attempt to load the various configurations as
# part of the task, so just leave cfg=None.
cfg = RuntimeConfig.from_args(parsed)
except DbtProjectError as e:
logger.info("Encountered an error while reading the project:")
logger.info(dbt.compat.to_string(e))
dbt.tracking.track_invalid_invocation(
config=cfg,
args=parsed,
result_type=e.result_type)
return None
except DbtProfileError as e:
logger.info("Encountered an error while reading profiles:")
logger.info(" ERROR {}".format(str(e)))
all_profiles = read_profiles(parsed.profiles_dir).keys()
if len(all_profiles) > 0:
logger.info("Defined profiles:")
for profile in all_profiles:
logger.info(" - {}".format(profile))
else:
logger.info("There are no profiles defined in your "
"profiles.yml file")
logger.info(PROFILES_HELP_MESSAGE)
dbt.tracking.track_invalid_invocation(
config=cfg,
args=parsed,
result_type=e.result_type)
return None
flags.NON_DESTRUCTIVE = getattr(parsed, 'non_destructive', False)
flags.USE_CACHE = getattr(parsed, 'use_cache', True)
arg_drop_existing = getattr(parsed, 'drop_existing', False)
arg_full_refresh = getattr(parsed, 'full_refresh', False)
if arg_drop_existing:
dbt.deprecations.warn('drop-existing')
flags.FULL_REFRESH = True
elif arg_full_refresh:
flags.FULL_REFRESH = True
logger.debug("running dbt with arguments %s", parsed)
task = parsed.cls(args=parsed, config=cfg)
return task, cfg
def parse_args(args):
p = DBTArgumentParser(
prog='dbt: data build tool',
formatter_class=argparse.RawTextHelpFormatter,
description="An ELT tool for managing your SQL "
"transformations and data models."
"\nFor more documentation on these commands, visit: "
"docs.getdbt.com",
epilog="Specify one of these sub-commands and you can "
"find more help from there.")
p.add_argument(
'--version',
action='dbtversion',
help="Show version information")
p.add_argument(
'-r',
'--record-timing-info',
default=None,
type=str,
help="""
When this option is passed, dbt will output low-level timing
stats to the specified file. Example:
`--record-timing-info output.profile`
"""
)
p.add_argument(
'-d',
'--debug',
action='store_true',
help='''Display debug logging during dbt execution. Useful for
debugging and making bug reports.''')
p.add_argument(
'-S',
'--strict',
action='store_true',
help='''Run schema validations at runtime. This will surface
bugs in dbt, but may incur a performance penalty.''')
# if set, run dbt in single-threaded mode: thread count is ignored, and
# calls go through `map` instead of the thread pool. This is useful for
# getting performance information about aspects of dbt that normally run in
# a thread, as the profiler ignores child threads. Users should really
# never use this.
p.add_argument(
'--single-threaded',
action='store_true',
help=argparse.SUPPRESS,
)
subs = p.add_subparsers(title="Available sub-commands")
base_subparser = argparse.ArgumentParser(add_help=False)
base_subparser.add_argument(
'--profiles-dir',
default=PROFILES_DIR,
type=str,
help="""
Which directory to look in for the profiles.yml file. Default = {}
""".format(PROFILES_DIR)
)
base_subparser.add_argument(
'--profile',
required=False,
type=str,
help="""
Which profile to load. Overrides setting in dbt_project.yml.
"""
)
base_subparser.add_argument(
'--target',
default=None,
type=str,
help='Which target to load for the given profile'
)
base_subparser.add_argument(
'--vars',
type=str,
default='{}',
help="""
Supply variables to the project. This argument overrides
variables defined in your dbt_project.yml file. This argument
should be a YAML string, eg. '{my_variable: my_value}'"""
)
# if set, log all cache events. This is extremely verbose!
base_subparser.add_argument(
'--log-cache-events',
action='store_true',
help=argparse.SUPPRESS,
)
base_subparser.add_argument(
'--bypass-cache',
action='store_false',
dest='use_cache',
help='If set, bypass the adapter-level cache of database state',
)
sub = subs.add_parser(
'init',
parents=[base_subparser],
help="Initialize a new DBT project.")
sub.add_argument('project_name', type=str, help='Name of the new project')
sub.set_defaults(cls=init_task.InitTask, which='init')
sub = subs.add_parser(
'clean',
parents=[base_subparser],
help="Delete all folders in the clean-targets list"
"\n(usually the dbt_modules and target directories.)")
sub.set_defaults(cls=clean_task.CleanTask, which='clean')
sub = subs.add_parser(
'debug',
parents=[base_subparser],
help="Show some helpful information about dbt for debugging."
"\nNot to be confused with the --debug option which increases "
"verbosity.")
sub.add_argument(
'--config-dir',
action='store_true',
help="""
If specified, DBT will show path information for this project
"""
)
sub.set_defaults(cls=debug_task.DebugTask, which='debug')
sub = subs.add_parser(
'deps',
parents=[base_subparser],
help="Pull the most recent version of the dependencies "
"listed in packages.yml")
sub.set_defaults(cls=deps_task.DepsTask, which='deps')
sub = subs.add_parser(
'archive',
parents=[base_subparser],
help="Record changes to a mutable table over time."
"\nMust be configured in your dbt_project.yml.")
sub.add_argument(
'--threads',
type=int,
required=False,
help="""
Specify number of threads to use while archiving tables. Overrides
settings in profiles.yml.
"""
)
sub.set_defaults(cls=archive_task.ArchiveTask, which='archive')
run_sub = subs.add_parser(
'run',
parents=[base_subparser],
help="Compile SQL and execute against the current "
"target database.")
run_sub.set_defaults(cls=run_task.RunTask, which='run')
compile_sub = subs.add_parser(
'compile',
parents=[base_subparser],
help="Generates executable SQL from source model, test, and"
"analysis files. \nCompiled SQL files are written to the target/"
"directory.")
compile_sub.set_defaults(cls=compile_task.CompileTask, which='compile')
docs_sub = subs.add_parser(
'docs',
parents=[base_subparser],
help="Generate or serve the documentation "
"website for your project.")
docs_subs = docs_sub.add_subparsers()
# it might look like docs_sub is the correct parents entry, but that
# will cause weird errors about 'conflicting option strings'.
generate_sub = docs_subs.add_parser('generate', parents=[base_subparser])
generate_sub.set_defaults(cls=generate_task.GenerateTask,
which='generate')
generate_sub.add_argument(
'--no-compile',
action='store_false',
dest='compile',
help='Do not run "dbt compile" as part of docs generation'
)
for sub in [run_sub, compile_sub, generate_sub]:
sub.add_argument(
'-m',
'--models',
required=False,
nargs='+',
help="""
Specify the models to include.
"""
)
sub.add_argument(
'--exclude',
required=False,
nargs='+',
help="""
Specify the models to exclude.
"""
)
sub.add_argument(
'--threads',
type=int,
required=False,
help="""
Specify number of threads to use while executing models. Overrides
settings in profiles.yml.
"""
)
sub.add_argument(
'--non-destructive',
action='store_true',
help="""
If specified, DBT will not drop views. Tables will be truncated
instead of dropped.
"""
)
sub.add_argument(
'--full-refresh',
action='store_true',
help="""
If specified, DBT will drop incremental models and
fully-recalculate the incremental table from the model definition.
""")
seed_sub = subs.add_parser(
'seed',
parents=[base_subparser],
help="Load data from csv files into your data warehouse.")
seed_sub.add_argument(
'--drop-existing',
action='store_true',
help='(DEPRECATED) Use --full-refresh instead.'
)
seed_sub.add_argument(
'--full-refresh',
action='store_true',
help='Drop existing seed tables and recreate them'
)
seed_sub.add_argument(
'--show',
action='store_true',
help='Show a sample of the loaded data in the terminal'
)
seed_sub.set_defaults(cls=seed_task.SeedTask, which='seed')
serve_sub = docs_subs.add_parser('serve', parents=[base_subparser])
serve_sub.add_argument(
'--port',
default=8080,
type=int,
help='Specify the port number for the docs server.'
)
serve_sub.set_defaults(cls=serve_task.ServeTask,
which='serve')
sub = subs.add_parser(
'test',
parents=[base_subparser],
help="Runs tests on data in deployed models."
"Run this after `dbt run`")
sub.add_argument(
'--data',
action='store_true',
help='Run data tests defined in "tests" directory.'
)
sub.add_argument(
'--schema',
action='store_true',
help='Run constraint validations from schema.yml files'
)
sub.add_argument(
'--threads',
type=int,
required=False,
help="""
Specify number of threads to use while executing tests. Overrides
settings in profiles.yml
"""
)
sub.add_argument(
'-m',
'--models',
required=False,
nargs='+',
help="""
Specify the models to test.
"""
)
sub.add_argument(
'--exclude',
required=False,
nargs='+',
help="""
Specify the models to exclude from testing.
"""
)
sub.set_defaults(cls=test_task.TestTask, which='test')
if len(args) == 0:
p.print_help()
sys.exit(1)
parsed = p.parse_args(args)
parsed.profiles_dir = os.path.expanduser(parsed.profiles_dir)
if not hasattr(parsed, 'which'):
# the user did not provide a valid subcommand. trigger the help message
# and exit with a error
p.print_help()
p.exit(1)
return parsed
Print dbt version before every task is run
resolves fishtown-analytics/dbt#1134
from dbt.logger import initialize_logger, GLOBAL_LOGGER as logger, \
logger_initialized, log_cache_events
import argparse
import os.path
import sys
import traceback
import dbt.version
import dbt.flags as flags
import dbt.task.run as run_task
import dbt.task.compile as compile_task
import dbt.task.debug as debug_task
import dbt.task.clean as clean_task
import dbt.task.deps as deps_task
import dbt.task.init as init_task
import dbt.task.seed as seed_task
import dbt.task.test as test_task
import dbt.task.archive as archive_task
import dbt.task.generate as generate_task
import dbt.task.serve as serve_task
from dbt.adapters.factory import reset_adapters
import dbt.tracking
import dbt.ui.printer
import dbt.compat
import dbt.deprecations
import dbt.profiler
from dbt.utils import ExitCodes
from dbt.config import Project, UserConfig, RuntimeConfig, PROFILES_DIR, \
read_profiles
from dbt.exceptions import DbtProjectError, DbtProfileError, RuntimeException
PROFILES_HELP_MESSAGE = """
For more information on configuring profiles, please consult the dbt docs:
https://docs.getdbt.com/docs/configure-your-profile
"""
class DBTVersion(argparse.Action):
"""This is very very similar to the builtin argparse._Version action,
except it just calls dbt.version.get_version_information().
"""
def __init__(self,
option_strings,
version=None,
dest=argparse.SUPPRESS,
default=argparse.SUPPRESS,
help="show program's version number and exit"):
super(DBTVersion, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
formatter = parser._get_formatter()
formatter.add_text(dbt.version.get_version_information())
parser.exit(message=formatter.format_help())
class DBTArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super(DBTArgumentParser, self).__init__(*args, **kwargs)
self.register('action', 'dbtversion', DBTVersion)
def main(args=None):
if args is None:
args = sys.argv[1:]
try:
results, succeeded = handle_and_check(args)
if succeeded:
exit_code = ExitCodes.Success
else:
exit_code = ExitCodes.ModelError
except KeyboardInterrupt as e:
logger.info("ctrl-c")
exit_code = ExitCodes.UnhandledError
# This can be thrown by eg. argparse
except SystemExit as e:
exit_code = e.code
except BaseException as e:
logger.info("Encountered an error:")
logger.info(str(e))
if logger_initialized():
logger.debug(traceback.format_exc())
elif not isinstance(e, RuntimeException):
# if it did not come from dbt proper and the logger is not
# initialized (so there's no safe path to log to), log the stack
# trace at error level.
logger.error(traceback.format_exc())
exit_code = ExitCodes.UnhandledError
sys.exit(exit_code)
# here for backwards compatibility
def handle(args):
res, success = handle_and_check(args)
return res
def initialize_config_values(parsed):
"""Given the parsed args, initialize the dbt tracking code.
It would be nice to re-use this profile later on instead of parsing it
twice, but dbt's intialization is not structured in a way that makes that
easy.
"""
try:
cfg = UserConfig.from_directory(parsed.profiles_dir)
except RuntimeException:
cfg = UserConfig.from_dict(None)
if cfg.send_anonymous_usage_stats:
dbt.tracking.initialize_tracking(parsed.profiles_dir)
else:
dbt.tracking.do_not_track()
if cfg.use_colors:
dbt.ui.printer.use_colors()
def handle_and_check(args):
parsed = parse_args(args)
profiler_enabled = False
if parsed.record_timing_info:
profiler_enabled = True
with dbt.profiler.profiler(
enable=profiler_enabled,
outfile=parsed.record_timing_info
):
initialize_config_values(parsed)
reset_adapters()
try:
task, res = run_from_args(parsed)
finally:
dbt.tracking.flush()
success = task.interpret_results(res)
return res, success
def get_nearest_project_dir():
root_path = os.path.abspath(os.sep)
cwd = os.getcwd()
while cwd != root_path:
project_file = os.path.join(cwd, "dbt_project.yml")
if os.path.exists(project_file):
return cwd
cwd = os.path.dirname(cwd)
return None
def run_from_args(parsed):
task = None
cfg = None
if parsed.which in ('init', 'debug'):
# bypass looking for a project file if we're running `dbt init` or
# `dbt debug`
task = parsed.cls(args=parsed)
else:
nearest_project_dir = get_nearest_project_dir()
if nearest_project_dir is None:
raise RuntimeException(
"fatal: Not a dbt project (or any of the parent directories). "
"Missing dbt_project.yml file"
)
os.chdir(nearest_project_dir)
res = invoke_dbt(parsed)
if res is None:
raise RuntimeException("Could not run dbt")
else:
task, cfg = res
log_path = None
if cfg is not None:
log_path = cfg.log_path
initialize_logger(parsed.debug, log_path)
logger.debug("Tracking: {}".format(dbt.tracking.active_user.state()))
dbt.tracking.track_invocation_start(config=cfg, args=parsed)
results = run_from_task(task, cfg, parsed)
return task, results
def run_from_task(task, cfg, parsed_args):
result = None
try:
result = task.run()
dbt.tracking.track_invocation_end(
config=cfg, args=parsed_args, result_type="ok"
)
except (dbt.exceptions.NotImplementedException,
dbt.exceptions.FailedToConnectException) as e:
logger.info('ERROR: {}'.format(e))
dbt.tracking.track_invocation_end(
config=cfg, args=parsed_args, result_type="error"
)
except Exception as e:
dbt.tracking.track_invocation_end(
config=cfg, args=parsed_args, result_type="error"
)
raise
return result
def invoke_dbt(parsed):
task = None
cfg = None
log_cache_events(getattr(parsed, 'log_cache_events', False))
logger.info("Running with dbt{}".format(dbt.version.installed))
try:
if parsed.which in {'deps', 'clean'}:
# deps doesn't need a profile, so don't require one.
cfg = Project.from_current_directory(getattr(parsed, 'vars', '{}'))
elif parsed.which != 'debug':
# for debug, we will attempt to load the various configurations as
# part of the task, so just leave cfg=None.
cfg = RuntimeConfig.from_args(parsed)
except DbtProjectError as e:
logger.info("Encountered an error while reading the project:")
logger.info(dbt.compat.to_string(e))
dbt.tracking.track_invalid_invocation(
config=cfg,
args=parsed,
result_type=e.result_type)
return None
except DbtProfileError as e:
logger.info("Encountered an error while reading profiles:")
logger.info(" ERROR {}".format(str(e)))
all_profiles = read_profiles(parsed.profiles_dir).keys()
if len(all_profiles) > 0:
logger.info("Defined profiles:")
for profile in all_profiles:
logger.info(" - {}".format(profile))
else:
logger.info("There are no profiles defined in your "
"profiles.yml file")
logger.info(PROFILES_HELP_MESSAGE)
dbt.tracking.track_invalid_invocation(
config=cfg,
args=parsed,
result_type=e.result_type)
return None
flags.NON_DESTRUCTIVE = getattr(parsed, 'non_destructive', False)
flags.USE_CACHE = getattr(parsed, 'use_cache', True)
arg_drop_existing = getattr(parsed, 'drop_existing', False)
arg_full_refresh = getattr(parsed, 'full_refresh', False)
if arg_drop_existing:
dbt.deprecations.warn('drop-existing')
flags.FULL_REFRESH = True
elif arg_full_refresh:
flags.FULL_REFRESH = True
logger.debug("running dbt with arguments %s", parsed)
task = parsed.cls(args=parsed, config=cfg)
return task, cfg
def parse_args(args):
p = DBTArgumentParser(
prog='dbt: data build tool',
formatter_class=argparse.RawTextHelpFormatter,
description="An ELT tool for managing your SQL "
"transformations and data models."
"\nFor more documentation on these commands, visit: "
"docs.getdbt.com",
epilog="Specify one of these sub-commands and you can "
"find more help from there.")
p.add_argument(
'--version',
action='dbtversion',
help="Show version information")
p.add_argument(
'-r',
'--record-timing-info',
default=None,
type=str,
help="""
When this option is passed, dbt will output low-level timing
stats to the specified file. Example:
`--record-timing-info output.profile`
"""
)
p.add_argument(
'-d',
'--debug',
action='store_true',
help='''Display debug logging during dbt execution. Useful for
debugging and making bug reports.''')
p.add_argument(
'-S',
'--strict',
action='store_true',
help='''Run schema validations at runtime. This will surface
bugs in dbt, but may incur a performance penalty.''')
# if set, run dbt in single-threaded mode: thread count is ignored, and
# calls go through `map` instead of the thread pool. This is useful for
# getting performance information about aspects of dbt that normally run in
# a thread, as the profiler ignores child threads. Users should really
# never use this.
p.add_argument(
'--single-threaded',
action='store_true',
help=argparse.SUPPRESS,
)
subs = p.add_subparsers(title="Available sub-commands")
base_subparser = argparse.ArgumentParser(add_help=False)
base_subparser.add_argument(
'--profiles-dir',
default=PROFILES_DIR,
type=str,
help="""
Which directory to look in for the profiles.yml file. Default = {}
""".format(PROFILES_DIR)
)
base_subparser.add_argument(
'--profile',
required=False,
type=str,
help="""
Which profile to load. Overrides setting in dbt_project.yml.
"""
)
base_subparser.add_argument(
'--target',
default=None,
type=str,
help='Which target to load for the given profile'
)
base_subparser.add_argument(
'--vars',
type=str,
default='{}',
help="""
Supply variables to the project. This argument overrides
variables defined in your dbt_project.yml file. This argument
should be a YAML string, eg. '{my_variable: my_value}'"""
)
# if set, log all cache events. This is extremely verbose!
base_subparser.add_argument(
'--log-cache-events',
action='store_true',
help=argparse.SUPPRESS,
)
base_subparser.add_argument(
'--bypass-cache',
action='store_false',
dest='use_cache',
help='If set, bypass the adapter-level cache of database state',
)
sub = subs.add_parser(
'init',
parents=[base_subparser],
help="Initialize a new DBT project.")
sub.add_argument('project_name', type=str, help='Name of the new project')
sub.set_defaults(cls=init_task.InitTask, which='init')
sub = subs.add_parser(
'clean',
parents=[base_subparser],
help="Delete all folders in the clean-targets list"
"\n(usually the dbt_modules and target directories.)")
sub.set_defaults(cls=clean_task.CleanTask, which='clean')
sub = subs.add_parser(
'debug',
parents=[base_subparser],
help="Show some helpful information about dbt for debugging."
"\nNot to be confused with the --debug option which increases "
"verbosity.")
sub.add_argument(
'--config-dir',
action='store_true',
help="""
If specified, DBT will show path information for this project
"""
)
sub.set_defaults(cls=debug_task.DebugTask, which='debug')
sub = subs.add_parser(
'deps',
parents=[base_subparser],
help="Pull the most recent version of the dependencies "
"listed in packages.yml")
sub.set_defaults(cls=deps_task.DepsTask, which='deps')
sub = subs.add_parser(
'archive',
parents=[base_subparser],
help="Record changes to a mutable table over time."
"\nMust be configured in your dbt_project.yml.")
sub.add_argument(
'--threads',
type=int,
required=False,
help="""
Specify number of threads to use while archiving tables. Overrides
settings in profiles.yml.
"""
)
sub.set_defaults(cls=archive_task.ArchiveTask, which='archive')
run_sub = subs.add_parser(
'run',
parents=[base_subparser],
help="Compile SQL and execute against the current "
"target database.")
run_sub.set_defaults(cls=run_task.RunTask, which='run')
compile_sub = subs.add_parser(
'compile',
parents=[base_subparser],
help="Generates executable SQL from source model, test, and"
"analysis files. \nCompiled SQL files are written to the target/"
"directory.")
compile_sub.set_defaults(cls=compile_task.CompileTask, which='compile')
docs_sub = subs.add_parser(
'docs',
parents=[base_subparser],
help="Generate or serve the documentation "
"website for your project.")
docs_subs = docs_sub.add_subparsers()
# it might look like docs_sub is the correct parents entry, but that
# will cause weird errors about 'conflicting option strings'.
generate_sub = docs_subs.add_parser('generate', parents=[base_subparser])
generate_sub.set_defaults(cls=generate_task.GenerateTask,
which='generate')
generate_sub.add_argument(
'--no-compile',
action='store_false',
dest='compile',
help='Do not run "dbt compile" as part of docs generation'
)
for sub in [run_sub, compile_sub, generate_sub]:
sub.add_argument(
'-m',
'--models',
required=False,
nargs='+',
help="""
Specify the models to include.
"""
)
sub.add_argument(
'--exclude',
required=False,
nargs='+',
help="""
Specify the models to exclude.
"""
)
sub.add_argument(
'--threads',
type=int,
required=False,
help="""
Specify number of threads to use while executing models. Overrides
settings in profiles.yml.
"""
)
sub.add_argument(
'--non-destructive',
action='store_true',
help="""
If specified, DBT will not drop views. Tables will be truncated
instead of dropped.
"""
)
sub.add_argument(
'--full-refresh',
action='store_true',
help="""
If specified, DBT will drop incremental models and
fully-recalculate the incremental table from the model definition.
""")
seed_sub = subs.add_parser(
'seed',
parents=[base_subparser],
help="Load data from csv files into your data warehouse.")
seed_sub.add_argument(
'--drop-existing',
action='store_true',
help='(DEPRECATED) Use --full-refresh instead.'
)
seed_sub.add_argument(
'--full-refresh',
action='store_true',
help='Drop existing seed tables and recreate them'
)
seed_sub.add_argument(
'--show',
action='store_true',
help='Show a sample of the loaded data in the terminal'
)
seed_sub.set_defaults(cls=seed_task.SeedTask, which='seed')
serve_sub = docs_subs.add_parser('serve', parents=[base_subparser])
serve_sub.add_argument(
'--port',
default=8080,
type=int,
help='Specify the port number for the docs server.'
)
serve_sub.set_defaults(cls=serve_task.ServeTask,
which='serve')
sub = subs.add_parser(
'test',
parents=[base_subparser],
help="Runs tests on data in deployed models."
"Run this after `dbt run`")
sub.add_argument(
'--data',
action='store_true',
help='Run data tests defined in "tests" directory.'
)
sub.add_argument(
'--schema',
action='store_true',
help='Run constraint validations from schema.yml files'
)
sub.add_argument(
'--threads',
type=int,
required=False,
help="""
Specify number of threads to use while executing tests. Overrides
settings in profiles.yml
"""
)
sub.add_argument(
'-m',
'--models',
required=False,
nargs='+',
help="""
Specify the models to test.
"""
)
sub.add_argument(
'--exclude',
required=False,
nargs='+',
help="""
Specify the models to exclude from testing.
"""
)
sub.set_defaults(cls=test_task.TestTask, which='test')
if len(args) == 0:
p.print_help()
sys.exit(1)
parsed = p.parse_args(args)
parsed.profiles_dir = os.path.expanduser(parsed.profiles_dir)
if not hasattr(parsed, 'which'):
# the user did not provide a valid subcommand. trigger the help message
# and exit with a error
p.print_help()
p.exit(1)
return parsed
|
#!/usr/bin/python
"""
/**
This software was developed by Institut Laue-Langevin as part of
Distributed Data Analysis of Neutron Scattering Experiments (DANSE).
Copyright 2012 Institut Laue-Langevin
**/
"""
import wx
import sys
import os
from wx.lib.mixins.listctrl import CheckListCtrlMixin, ListCtrlAutoWidthMixin
from collections import defaultdict
import cPickle as pickle
from sans.guiframe.events import ChangeCategoryEvent
from sans.guiframe.CategoryInstaller import CategoryInstaller
""" Notes
The category manager mechanism works from 3 data structures used:
- self.master_category_dict: keys are the names of categories,
the values are lists of tuples,
the first being the model names (the models belonging to that
category), the second a boolean
of whether or not the model is enabled
- self.by_model_dict: keys are model names, values are a list
of categories belonging to that model
- self.model_enabled_dict: keys are model names, values are
bools of whether the model is enabled
use self._regenerate_model_dict() to create the latter two
structures from the former
use self._regenerate_master_dict() to create the first
structure from the latter two
The need for so many data structures comes from the fact
sometimes we need fast access
to all the models in a category (eg user selection from the gui)
and sometimes we need access to all the categories
corresponding to a model (eg user modification of model categories)
"""
class CheckListCtrl(wx.ListCtrl, CheckListCtrlMixin,
ListCtrlAutoWidthMixin):
"""
Taken from
http://zetcode.com/wxpython/advanced/
"""
def __init__(self, parent, callback_func):
"""
Initialization
:param parent: Parent window
:param callback_func: A function to be called when
an element is clicked
"""
wx.ListCtrl.__init__(self, parent, -1, style=wx.LC_REPORT \
| wx.SUNKEN_BORDER)
CheckListCtrlMixin.__init__(self)
ListCtrlAutoWidthMixin.__init__(self)
self.callback_func = callback_func
def OnCheckItem(self, index, flag):
"""
When the user checks the item we need to save that state
"""
self.callback_func(index, flag)
class CategoryManager(wx.Frame):
"""
A class for managing categories
"""
def __init__(self, parent, win_id, title):
"""
Category Manager Dialog class
:param win_id: A new wx ID
:param title: Title for the window
"""
# make sure the category file is where it should be
self.performance_blocking = False
self.master_category_dict = defaultdict(list)
self.by_model_dict = defaultdict(list)
self.model_enabled_dict = defaultdict(bool)
wx.Frame.__init__(self, parent, win_id, title, size=(650, 400))
panel = wx.Panel(self, -1)
self.parent = parent
self._read_category_info()
vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
left_panel = wx.Panel(panel, -1)
right_panel = wx.Panel(panel, -1)
self.cat_list = CheckListCtrl(right_panel, self._on_check)
self.cat_list.InsertColumn(0, 'Model', width = 280)
self.cat_list.InsertColumn(1, 'Category', width = 240)
self._fill_lists()
self._regenerate_model_dict()
self._set_enabled()
vbox2 = wx.BoxSizer(wx.VERTICAL)
sel = wx.Button(left_panel, -1, 'Enable All', size=(100, -1))
des = wx.Button(left_panel, -1, 'Disable All', size=(100, -1))
modify_button = wx.Button(left_panel, -1, 'Modify',
size=(100, -1))
ok_button = wx.Button(left_panel, -1, 'OK', size=(100, -1))
cancel_button = wx.Button(left_panel, -1, 'Cancel',
size=(100, -1))
self.Bind(wx.EVT_BUTTON, self._on_selectall,
id=sel.GetId())
self.Bind(wx.EVT_BUTTON, self._on_deselectall,
id=des.GetId())
self.Bind(wx.EVT_BUTTON, self._on_apply,
id = modify_button.GetId())
self.Bind(wx.EVT_BUTTON, self._on_ok,
id = ok_button.GetId())
self.Bind(wx.EVT_BUTTON, self._on_cancel,
id = cancel_button.GetId())
vbox2.Add(modify_button, 0, wx.TOP, 10)
vbox2.Add((-1, 20))
vbox2.Add(sel)
vbox2.Add(des)
vbox2.Add((-1, 20))
vbox2.Add(ok_button)
vbox2.Add(cancel_button)
left_panel.SetSizer(vbox2)
vbox.Add(self.cat_list, 1, wx.EXPAND | wx.TOP, 3)
vbox.Add((-1, 10))
right_panel.SetSizer(vbox)
hbox.Add(left_panel, 0, wx.EXPAND | wx.RIGHT, 5)
hbox.Add(right_panel, 1, wx.EXPAND)
hbox.Add((3, -1))
panel.SetSizer(hbox)
self.performance_blocking = True
self.Centre()
self.Show(True)
# gui stuff finished
def _on_check(self, index, flag):
"""
When the user checks an item we need to immediately save that state.
:param index: The index of the checked item
:param flag: True or False whether the item was checked
"""
if self.performance_blocking:
# for computational reasons we don't want to
# call this function every time the gui is set up
model_name = self.cat_list.GetItem(index, 0).GetText()
self.model_enabled_dict[model_name] = flag
self._regenerate_master_dict()
def _fill_lists(self):
"""
Expands lists on the GUI
"""
self.cat_list.DeleteAllItems()
model_name_list = [model for model in self.by_model_dict]
model_name_list.sort()
for model in model_name_list:
index = self.cat_list.InsertStringItem(sys.maxint, model)
self.cat_list.SetStringItem(index, 1, \
str(self.by_model_dict[model]).\
replace("'","").\
replace("[","").\
replace("]",""))
def _set_enabled(self):
"""
Updates enabled models from self.model_enabled_dict
"""
num = self.cat_list.GetItemCount()
for i in range(num):
model_name = self.cat_list.GetItem(i, 0).GetText()
self.cat_list.CheckItem(i,
self.model_enabled_dict[model_name] )
def _on_selectall(self, event):
"""
Callback for 'enable all'
"""
self.performance_blocking = False
num = self.cat_list.GetItemCount()
for i in range(num):
self.cat_list.CheckItem(i)
for model in self.model_enabled_dict:
self.model_enabled_dict[model] = True
self._regenerate_master_dict()
self.performance_blocking = True
def _on_deselectall(self, event):
"""
Callback for 'disable all'
"""
self.performance_blocking = False
num = self.cat_list.GetItemCount()
for i in range(num):
self.cat_list.CheckItem(i, False)
for model in self.model_enabled_dict:
self.model_enabled_dict[model] = False
self._regenerate_master_dict()
self.performance_blocking = True
def _on_apply(self, event):
"""
Call up the 'ChangeCat' dialog for category editing
"""
if self.cat_list.GetSelectedItemCount() == 0:
wx.MessageBox('Please select a model', 'Error',
wx.OK | wx.ICON_EXCLAMATION )
else:
selected_model = \
self.cat_list.GetItem(\
self.cat_list.GetFirstSelected(), 0).GetText()
modify_dialog = ChangeCat(self, 'Change Category: ' + \
selected_model,
self._get_cat_list(),
self.by_model_dict[selected_model])
if modify_dialog.ShowModal() == wx.ID_OK:
self.by_model_dict[selected_model] = \
modify_dialog.get_category()
self._regenerate_master_dict()
self._fill_lists()
self._set_enabled()
def _on_ok(self, event):
"""
Close the manager
"""
self._save_state()
evt = ChangeCategoryEvent()
wx.PostEvent(self.parent, evt)
self.Destroy()
def _on_cancel(self, event):
"""
On cancel
"""
self.Destroy()
def _save_state(self):
"""
Serializes categorization info to file
"""
self._regenerate_master_dict()
cat_file = open(CategoryInstaller.get_user_file(), 'wb')
pickle.dump( self.master_category_dict, cat_file )
def _read_category_info(self):
"""
Read in categorization info from file
"""
try:
file = CategoryInstaller.get_user_file()
if os.path.isfile(file):
cat_file = open(file, 'rb')
self.master_category_dict = pickle.load(cat_file)
else:
cat_file = open(CategoryInstaller.get_default_file(), 'rb')
self.master_category_dict = pickle.load(cat_file)
except IOError:
print 'Problem reading in category file. Please review'
self._regenerate_model_dict()
def _get_cat_list(self):
"""
Returns a simple list of categories
"""
cat_list = list()
for category in self.master_category_dict.iterkeys():
if not category == 'Uncategorized':
cat_list.append(category)
return cat_list
def _regenerate_model_dict(self):
"""
regenerates self.by_model_dict which has each model
name as the key
and the list of categories belonging to that model
along with the enabled mapping
"""
self.by_model_dict = defaultdict(list)
for category in self.master_category_dict:
for (model, enabled) in self.master_category_dict[category]:
self.by_model_dict[model].append(category)
self.model_enabled_dict[model] = enabled
def _regenerate_master_dict(self):
"""
regenerates self.master_category_dict from
self.by_model_dict and self.model_enabled_dict
"""
self.master_category_dict = defaultdict(list)
for model in self.by_model_dict:
for category in self.by_model_dict[model]:
self.master_category_dict[category].append\
((model, self.model_enabled_dict[model]))
class ChangeCat(wx.Dialog):
"""
dialog for changing the categories of a model
"""
def __init__(self, parent, title, cat_list, current_cats):
"""
Actual editor for a certain category
:param parent: Window parent
:param title: Window title
:param cat_list: List of all categories
:param current_cats: List of categories applied to current model
"""
wx.Dialog.__init__(self, parent, title = title, size=(485, 425))
self.current_cats = current_cats
if str(self.current_cats[0]) == 'Uncategorized':
self.current_cats = []
vbox = wx.BoxSizer(wx.VERTICAL)
self.add_sb = wx.StaticBox(self, label = "Add Category")
self.add_sb_sizer = wx.StaticBoxSizer(self.add_sb, wx.VERTICAL)
gs = wx.GridSizer(3, 2, 5, 5)
self.cat_list = cat_list
self.cat_text = wx.StaticText(self, label = "Current categories: ")
self.current_categories = wx.ListBox(self,
choices = self.current_cats
, size=(300, 100))
self.existing_check = wx.RadioButton(self,
label = 'Choose Existing')
self.new_check = wx.RadioButton(self, label = 'Create new')
self.exist_combo = wx.ComboBox(self, style = wx.CB_READONLY,
size=(220,-1), choices = cat_list)
self.exist_combo.SetSelection(0)
self.remove_sb = wx.StaticBox(self, label = "Remove Category")
self.remove_sb_sizer = wx.StaticBoxSizer(self.remove_sb,
wx.VERTICAL)
self.new_text = wx.TextCtrl(self, size=(220, -1))
self.ok_button = wx.Button(self, wx.ID_OK, "Done")
self.add_button = wx.Button(self, label = "Add")
self.add_button.Bind(wx.EVT_BUTTON, self.on_add)
self.remove_button = wx.Button(self, label = "Remove Selected")
self.remove_button.Bind(wx.EVT_BUTTON, self.on_remove)
self.existing_check.Bind(wx.EVT_RADIOBUTTON, self.on_existing)
self.new_check.Bind(wx.EVT_RADIOBUTTON, self.on_newcat)
self.existing_check.SetValue(True)
vbox.Add(self.cat_text, flag = wx.LEFT | wx.TOP | wx.ALIGN_LEFT,
border = 10)
vbox.Add(self.current_categories, flag = wx.ALL | wx.EXPAND,
border = 10 )
gs.AddMany( [ (self.existing_check, 5, wx.ALL),
(self.exist_combo, 5, wx.ALL),
(self.new_check, 5, wx.ALL),
(self.new_text, 5, wx.ALL ),
((-1,-1)),
(self.add_button, 5, wx.ALL | wx.ALIGN_RIGHT) ] )
self.add_sb_sizer.Add(gs, proportion = 1, flag = wx.ALL, border = 5)
vbox.Add(self.add_sb_sizer, flag = wx.ALL | wx.EXPAND, border = 10)
self.remove_sb_sizer.Add(self.remove_button, border = 5,
flag = wx.ALL | wx.ALIGN_RIGHT)
vbox.Add(self.remove_sb_sizer,
flag = wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND,
border = 10)
vbox.Add(self.ok_button, flag = wx.ALL | wx.ALIGN_RIGHT,
border = 10)
if self.current_categories.GetCount() > 0:
self.current_categories.SetSelection(0)
self.new_text.Disable()
self.SetSizer(vbox)
self.Centre()
self.Show(True)
def on_add(self, event):
"""
Callback for new category added
"""
new_cat = ''
if self.existing_check.GetValue():
new_cat = str(self.exist_combo.GetValue())
else:
new_cat = str(self.new_text.GetValue())
if new_cat in self.cat_list:
wx.MessageBox('%s is already a model' % new_cat, 'Error',
wx.OK | wx.ICON_EXCLAMATION )
return
if new_cat in self.current_cats:
wx.MessageBox('%s is already included in this model' \
% new_cat, 'Error',
wx.OK | wx.ICON_EXCLAMATION )
return
self.current_cats.append(new_cat)
self.current_categories.SetItems(self.current_cats)
def on_remove(self, event):
"""
Callback for a category removed
"""
if self.current_categories.GetSelection() == wx.NOT_FOUND:
wx.MessageBox('Please select a category to remove', 'Error',
wx.OK | wx.ICON_EXCLAMATION )
else:
self.current_categories.Delete( \
self.current_categories.GetSelection())
self.current_cats = self.current_categories.GetItems()
def on_newcat(self, event):
"""
Callback for new category added
"""
self.new_text.Enable()
self.exist_combo.Disable()
def on_existing(self, event):
"""
Callback for existing category selected
"""
self.new_text.Disable()
self.exist_combo.Enable()
def get_category(self):
"""
Returns a list of categories applying to this model
"""
if not self.current_cats:
self.current_cats.append("Uncategorized")
ret = list()
for cat in self.current_cats:
ret.append(str(cat))
return ret
if __name__ == '__main__':
if(len(sys.argv) > 1):
app = wx.App()
CategoryManager(None, -1, 'Category Manager', sys.argv[1])
app.MainLoop()
else:
app = wx.App()
CategoryManager(None, -1, 'Category Manager', sys.argv[1])
app.MainLoop()
MAC: fixed the remove in cat dial
#!/usr/bin/python
"""
/**
This software was developed by Institut Laue-Langevin as part of
Distributed Data Analysis of Neutron Scattering Experiments (DANSE).
Copyright 2012 Institut Laue-Langevin
**/
"""
import wx
import sys
import os
from wx.lib.mixins.listctrl import CheckListCtrlMixin, ListCtrlAutoWidthMixin
from collections import defaultdict
import cPickle as pickle
from sans.guiframe.events import ChangeCategoryEvent
from sans.guiframe.CategoryInstaller import CategoryInstaller
IS_MAC = (sys.platform == 'darwin')
""" Notes
The category manager mechanism works from 3 data structures used:
- self.master_category_dict: keys are the names of categories,
the values are lists of tuples,
the first being the model names (the models belonging to that
category), the second a boolean
of whether or not the model is enabled
- self.by_model_dict: keys are model names, values are a list
of categories belonging to that model
- self.model_enabled_dict: keys are model names, values are
bools of whether the model is enabled
use self._regenerate_model_dict() to create the latter two
structures from the former
use self._regenerate_master_dict() to create the first
structure from the latter two
The need for so many data structures comes from the fact
sometimes we need fast access
to all the models in a category (eg user selection from the gui)
and sometimes we need access to all the categories
corresponding to a model (eg user modification of model categories)
"""
class CheckListCtrl(wx.ListCtrl, CheckListCtrlMixin,
ListCtrlAutoWidthMixin):
"""
Taken from
http://zetcode.com/wxpython/advanced/
"""
def __init__(self, parent, callback_func):
"""
Initialization
:param parent: Parent window
:param callback_func: A function to be called when
an element is clicked
"""
wx.ListCtrl.__init__(self, parent, -1, style=wx.LC_REPORT \
| wx.SUNKEN_BORDER)
CheckListCtrlMixin.__init__(self)
ListCtrlAutoWidthMixin.__init__(self)
self.callback_func = callback_func
def OnCheckItem(self, index, flag):
"""
When the user checks the item we need to save that state
"""
self.callback_func(index, flag)
class CategoryManager(wx.Frame):
"""
A class for managing categories
"""
def __init__(self, parent, win_id, title):
"""
Category Manager Dialog class
:param win_id: A new wx ID
:param title: Title for the window
"""
# make sure the category file is where it should be
self.performance_blocking = False
self.master_category_dict = defaultdict(list)
self.by_model_dict = defaultdict(list)
self.model_enabled_dict = defaultdict(bool)
wx.Frame.__init__(self, parent, win_id, title, size=(650, 400))
panel = wx.Panel(self, -1)
self.parent = parent
self._read_category_info()
vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
left_panel = wx.Panel(panel, -1)
right_panel = wx.Panel(panel, -1)
self.cat_list = CheckListCtrl(right_panel, self._on_check)
self.cat_list.InsertColumn(0, 'Model', width = 280)
self.cat_list.InsertColumn(1, 'Category', width = 240)
self._fill_lists()
self._regenerate_model_dict()
self._set_enabled()
vbox2 = wx.BoxSizer(wx.VERTICAL)
sel = wx.Button(left_panel, -1, 'Enable All', size=(100, -1))
des = wx.Button(left_panel, -1, 'Disable All', size=(100, -1))
modify_button = wx.Button(left_panel, -1, 'Modify',
size=(100, -1))
ok_button = wx.Button(left_panel, -1, 'OK', size=(100, -1))
cancel_button = wx.Button(left_panel, -1, 'Cancel',
size=(100, -1))
self.Bind(wx.EVT_BUTTON, self._on_selectall,
id=sel.GetId())
self.Bind(wx.EVT_BUTTON, self._on_deselectall,
id=des.GetId())
self.Bind(wx.EVT_BUTTON, self._on_apply,
id = modify_button.GetId())
self.Bind(wx.EVT_BUTTON, self._on_ok,
id = ok_button.GetId())
self.Bind(wx.EVT_BUTTON, self._on_cancel,
id = cancel_button.GetId())
vbox2.Add(modify_button, 0, wx.TOP, 10)
vbox2.Add((-1, 20))
vbox2.Add(sel)
vbox2.Add(des)
vbox2.Add((-1, 20))
vbox2.Add(ok_button)
vbox2.Add(cancel_button)
left_panel.SetSizer(vbox2)
vbox.Add(self.cat_list, 1, wx.EXPAND | wx.TOP, 3)
vbox.Add((-1, 10))
right_panel.SetSizer(vbox)
hbox.Add(left_panel, 0, wx.EXPAND | wx.RIGHT, 5)
hbox.Add(right_panel, 1, wx.EXPAND)
hbox.Add((3, -1))
panel.SetSizer(hbox)
self.performance_blocking = True
self.Centre()
self.Show(True)
# gui stuff finished
def _on_check(self, index, flag):
"""
When the user checks an item we need to immediately save that state.
:param index: The index of the checked item
:param flag: True or False whether the item was checked
"""
if self.performance_blocking:
# for computational reasons we don't want to
# call this function every time the gui is set up
model_name = self.cat_list.GetItem(index, 0).GetText()
self.model_enabled_dict[model_name] = flag
self._regenerate_master_dict()
def _fill_lists(self):
"""
Expands lists on the GUI
"""
self.cat_list.DeleteAllItems()
model_name_list = [model for model in self.by_model_dict]
model_name_list.sort()
for model in model_name_list:
index = self.cat_list.InsertStringItem(sys.maxint, model)
self.cat_list.SetStringItem(index, 1, \
str(self.by_model_dict[model]).\
replace("'","").\
replace("[","").\
replace("]",""))
def _set_enabled(self):
"""
Updates enabled models from self.model_enabled_dict
"""
num = self.cat_list.GetItemCount()
for i in range(num):
model_name = self.cat_list.GetItem(i, 0).GetText()
self.cat_list.CheckItem(i,
self.model_enabled_dict[model_name] )
def _on_selectall(self, event):
"""
Callback for 'enable all'
"""
self.performance_blocking = False
num = self.cat_list.GetItemCount()
for i in range(num):
self.cat_list.CheckItem(i)
for model in self.model_enabled_dict:
self.model_enabled_dict[model] = True
self._regenerate_master_dict()
self.performance_blocking = True
def _on_deselectall(self, event):
"""
Callback for 'disable all'
"""
self.performance_blocking = False
num = self.cat_list.GetItemCount()
for i in range(num):
self.cat_list.CheckItem(i, False)
for model in self.model_enabled_dict:
self.model_enabled_dict[model] = False
self._regenerate_master_dict()
self.performance_blocking = True
def _on_apply(self, event):
"""
Call up the 'ChangeCat' dialog for category editing
"""
if self.cat_list.GetSelectedItemCount() == 0:
wx.MessageBox('Please select a model', 'Error',
wx.OK | wx.ICON_EXCLAMATION )
else:
selected_model = \
self.cat_list.GetItem(\
self.cat_list.GetFirstSelected(), 0).GetText()
modify_dialog = ChangeCat(self, selected_model,
self._get_cat_list(),
self.by_model_dict[selected_model])
if modify_dialog.ShowModal() == wx.ID_OK:
if not IS_MAC:
self.dial_ok(modify_dialog, selected_model)
def dial_ok(self, dialog=None, model=None):
"""
modify_dialog onclose
"""
self.by_model_dict[model] = dialog.get_category()
self._regenerate_master_dict()
self._fill_lists()
self._set_enabled()
def _on_ok(self, event):
"""
Close the manager
"""
self._save_state()
evt = ChangeCategoryEvent()
wx.PostEvent(self.parent, evt)
self.Destroy()
def _on_cancel(self, event):
"""
On cancel
"""
self.Destroy()
def _save_state(self):
"""
Serializes categorization info to file
"""
self._regenerate_master_dict()
cat_file = open(CategoryInstaller.get_user_file(), 'wb')
pickle.dump( self.master_category_dict, cat_file )
def _read_category_info(self):
"""
Read in categorization info from file
"""
try:
file = CategoryInstaller.get_user_file()
if os.path.isfile(file):
cat_file = open(file, 'rb')
self.master_category_dict = pickle.load(cat_file)
else:
cat_file = open(CategoryInstaller.get_default_file(), 'rb')
self.master_category_dict = pickle.load(cat_file)
except IOError:
print 'Problem reading in category file. Please review'
self._regenerate_model_dict()
def _get_cat_list(self):
"""
Returns a simple list of categories
"""
cat_list = list()
for category in self.master_category_dict.iterkeys():
if not category == 'Uncategorized':
cat_list.append(category)
return cat_list
def _regenerate_model_dict(self):
"""
regenerates self.by_model_dict which has each model
name as the key
and the list of categories belonging to that model
along with the enabled mapping
"""
self.by_model_dict = defaultdict(list)
for category in self.master_category_dict:
for (model, enabled) in self.master_category_dict[category]:
self.by_model_dict[model].append(category)
self.model_enabled_dict[model] = enabled
def _regenerate_master_dict(self):
"""
regenerates self.master_category_dict from
self.by_model_dict and self.model_enabled_dict
"""
self.master_category_dict = defaultdict(list)
for model in self.by_model_dict:
for category in self.by_model_dict[model]:
self.master_category_dict[category].append\
((model, self.model_enabled_dict[model]))
class ChangeCat(wx.Dialog):
"""
dialog for changing the categories of a model
"""
def __init__(self, parent, title, cat_list, current_cats):
"""
Actual editor for a certain category
:param parent: Window parent
:param title: Window title
:param cat_list: List of all categories
:param current_cats: List of categories applied to current model
"""
wx.Dialog.__init__(self, parent, title = 'Change Category: '+title, size=(485, 425))
self.current_cats = current_cats
if str(self.current_cats[0]) == 'Uncategorized':
self.current_cats = []
self.parent = parent
self.selcted_model = title
vbox = wx.BoxSizer(wx.VERTICAL)
self.add_sb = wx.StaticBox(self, label = "Add Category")
self.add_sb_sizer = wx.StaticBoxSizer(self.add_sb, wx.VERTICAL)
gs = wx.GridSizer(3, 2, 5, 5)
self.cat_list = cat_list
self.cat_text = wx.StaticText(self, label = "Current categories: ")
self.current_categories = wx.ListBox(self,
choices = self.current_cats
, size=(300, 100))
self.existing_check = wx.RadioButton(self,
label = 'Choose Existing')
self.new_check = wx.RadioButton(self, label = 'Create new')
self.exist_combo = wx.ComboBox(self, style = wx.CB_READONLY,
size=(220,-1), choices = cat_list)
self.exist_combo.SetSelection(0)
self.remove_sb = wx.StaticBox(self, label = "Remove Category")
self.remove_sb_sizer = wx.StaticBoxSizer(self.remove_sb,
wx.VERTICAL)
self.new_text = wx.TextCtrl(self, size=(220, -1))
self.ok_button = wx.Button(self, wx.ID_OK, "Done")
self.add_button = wx.Button(self, label = "Add")
self.add_button.Bind(wx.EVT_BUTTON, self.on_add)
self.remove_button = wx.Button(self, label = "Remove Selected")
self.remove_button.Bind(wx.EVT_BUTTON, self.on_remove)
self.existing_check.Bind(wx.EVT_RADIOBUTTON, self.on_existing)
self.new_check.Bind(wx.EVT_RADIOBUTTON, self.on_newcat)
self.existing_check.SetValue(True)
vbox.Add(self.cat_text, flag = wx.LEFT | wx.TOP | wx.ALIGN_LEFT,
border = 10)
vbox.Add(self.current_categories, flag = wx.ALL | wx.EXPAND,
border = 10 )
gs.AddMany( [ (self.existing_check, 5, wx.ALL),
(self.exist_combo, 5, wx.ALL),
(self.new_check, 5, wx.ALL),
(self.new_text, 5, wx.ALL ),
((-1,-1)),
(self.add_button, 5, wx.ALL | wx.ALIGN_RIGHT) ] )
self.add_sb_sizer.Add(gs, proportion = 1, flag = wx.ALL, border = 5)
vbox.Add(self.add_sb_sizer, flag = wx.ALL | wx.EXPAND, border = 10)
self.remove_sb_sizer.Add(self.remove_button, border = 5,
flag = wx.ALL | wx.ALIGN_RIGHT)
vbox.Add(self.remove_sb_sizer,
flag = wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND,
border = 10)
vbox.Add(self.ok_button, flag = wx.ALL | wx.ALIGN_RIGHT,
border = 10)
if self.current_categories.GetCount() > 0:
self.current_categories.SetSelection(0)
self.new_text.Disable()
self.SetSizer(vbox)
self.Centre()
self.Show(True)
if IS_MAC:
self.ok_button.Bind(wx.EVT_BUTTON, self.on_ok_mac)
def on_ok_mac(self, event):
"""
On OK pressed (MAC only)
"""
event.Skip()
self.parent.dial_ok(self, self.selcted_model)
self.Destroy()
def on_add(self, event):
"""
Callback for new category added
"""
new_cat = ''
if self.existing_check.GetValue():
new_cat = str(self.exist_combo.GetValue())
else:
new_cat = str(self.new_text.GetValue())
if new_cat in self.cat_list:
wx.MessageBox('%s is already a model' % new_cat, 'Error',
wx.OK | wx.ICON_EXCLAMATION )
return
if new_cat in self.current_cats:
wx.MessageBox('%s is already included in this model' \
% new_cat, 'Error',
wx.OK | wx.ICON_EXCLAMATION )
return
self.current_cats.append(new_cat)
self.current_categories.SetItems(self.current_cats)
def on_remove(self, event):
"""
Callback for a category removed
"""
if self.current_categories.GetSelection() == wx.NOT_FOUND:
wx.MessageBox('Please select a category to remove', 'Error',
wx.OK | wx.ICON_EXCLAMATION )
else:
self.current_categories.Delete( \
self.current_categories.GetSelection())
self.current_cats = self.current_categories.GetItems()
def on_newcat(self, event):
"""
Callback for new category added
"""
self.new_text.Enable()
self.exist_combo.Disable()
def on_existing(self, event):
"""
Callback for existing category selected
"""
self.new_text.Disable()
self.exist_combo.Enable()
def get_category(self):
"""
Returns a list of categories applying to this model
"""
if not self.current_cats:
self.current_cats.append("Uncategorized")
ret = list()
for cat in self.current_cats:
ret.append(str(cat))
return ret
if __name__ == '__main__':
if(len(sys.argv) > 1):
app = wx.App()
CategoryManager(None, -1, 'Category Manager', sys.argv[1])
app.MainLoop()
else:
app = wx.App()
CategoryManager(None, -1, 'Category Manager', sys.argv[1])
app.MainLoop()
|
#!/usr/bin/env python
""" WrapperGenerator class to generate model code automatically.
"""
import os, sys,re
import lineparser
class WrapperGenerator:
""" Python wrapper generator for C models
The developer must provide a header file describing
the new model.
To provide the name of the Python class to be
generated, the .h file must contain the following
string in the comments:
// [PYTHONCLASS] = my_model
where my_model must be replaced by the name of the
class that you want to import from sans.models.
(example: [PYTHONCLASS] = MyModel
will create a class MyModel in sans.models.MyModel.
It will also create a class CMyModel in
sans_extension.c_models.)
Also in comments, each parameter of the params
dictionary must be declared with a default value
in the following way:
// [DEFAULT]=param_name=default_value
(example:
// [DEFAULT]=radius=20.0
)
See cylinder.h for an example.
A .c file corresponding to the .h file should also
be provided (example: my_model.h, my_model.c).
The .h file should define two function definitions. For example,
cylinder.h defines the following:
/// 1D scattering function
double cylinder_analytical_1D(CylinderParameters *pars, double q);
/// 2D scattering function
double cylinder_analytical_2D(CylinderParameters *pars, double q, double phi);
The .c file implements those functions.
@author: Mathieu Doucet / UTK
@contact: mathieu.doucet@nist.gov
"""
def __init__(self, filename, output_dir='.', c_wrapper_dir='.'):
""" Initialization """
## Name of .h file to generate wrapper from
self.file = filename
# Info read from file
## Name of python class to write
self.pythonClass = None
## Parser in struct section
self.inStruct = False
self.foundCPP = False
self.inParDefs = False
## Name of struct for the c object
self.structName = None
## Dictionary of parameters
self.params = {}
## ModelCalculation module flag
self.modelCalcFlag = False
## List of default parameters (text)
self.default_list = ""
## Dictionary of units
self.details = ""
## List of dispersed parameters
self.disp_params = []
#model description
self.description=''
# paramaters for fittable
self.fixed= []
# paramaters for non-fittable
self.non_fittable= []
## parameters with orientation
self.orientation_params =[]
## parameter with magnetism
self.magentic_params = []
# Model category
self.category = None
# Whether model belongs to multifunc
self.is_multifunc = False
## output directory for wrappers
self.output_dir = output_dir
self.c_wrapper_dir = c_wrapper_dir
def __repr__(self):
""" Simple output for printing """
rep = "\n Python class: %s\n\n" % self.pythonClass
rep += " struc name: %s\n\n" % self.structName
rep += " params: %s\n\n" % self.params
rep += " description: %s\n\n" % self.description
rep += " Fittable parameters: %s\n\n"% self.fixed
rep += " Non-Fittable parameters: %s\n\n"% self.non_fittable
rep += " Orientation parameters: %s\n\n"% self.orientation_params
rep += " Magnetic parameters: %s\n\n"% self.magnetic_params
return rep
def read(self):
""" Reads in the .h file to catch parameters of the wrapper """
# Check if the file is there
if not os.path.isfile(self.file):
raise ValueError, "File %s is not a regular file" % self.file
# Read file
f = open(self.file,'r')
buf = f.read()
self.default_list = "List of default parameters:\n"
#lines = string.split(buf,'\n')
lines = buf.split('\n')
self.details = "## Parameter details [units, min, max]\n"
self.details += " self.details = {}\n"
#open item in this case Fixed
text='text'
key2="<%s>"%text.lower()
# close an item in this case fixed
text='TexT'
key3="</%s>"%text.lower()
## Catch fixed parameters
key = "[FIXED]"
try:
self.fixed= lineparser.readhelper(lines, key,
key2, key3, file=self.file)
except:
raise
## Catch non-fittable parameters parameters
key = "[NON_FITTABLE_PARAMS]"
try:
self.non_fittable= lineparser.readhelper(lines, key, key2,
key3, file=self.file)
except:
raise
## Catch parameters with orientation
key = "[ORIENTATION_PARAMS]"
try:
self.orientation_params = lineparser.readhelper(lines, key,
key2, key3, file=self.file)
except:
raise
## Catch parameters with orientation
key = "[MAGNETIC_PARAMS]"
try:
self.magnetic_params = lineparser.readhelper( lines,key,
key2,key3, file= self.file)
except:
raise
## Catch Description
key = "[DESCRIPTION]"
find_description = False
temp=""
for line in lines:
if line.count(key)>0 :
try:
find_description= True
index = line.index(key)
toks = line[index:].split("=",1 )
temp=toks[1].lstrip().rstrip()
text='text'
key2="<%s>"%text.lower()
if re.match(key2,temp)!=None:
toks2=temp.split(key2,1)
self.description=toks2[1]
text='text'
key2="</%s>"%text.lower()
if re.search(key2,toks2[1])!=None:
temp=toks2[1].split(key2,1)
self.description=temp[0]
break
else:
self.description=temp
break
except:
raise ValueError, "Could not parse file %s" % self.file
elif find_description:
text='text'
key2="</%s>"%text.lower()
if re.search(key2,line)!=None:
tok=line.split(key2,1)
temp=tok[0].split("//",1)
self.description+=tok[1].lstrip().rstrip()
break
else:
if re.search("//",line)!=None:
temp=line.split("//",1)
self.description+='\n\t\t'+temp[1].lstrip().rstrip()
else:
self.description+='\n\t\t'+line.lstrip().rstrip()
for line in lines:
# Catch class name
key = "[PYTHONCLASS]"
if line.count(key)>0:
try:
index = line.index(key)
toks = line[index:].split("=" )
self.pythonClass = toks[1].lstrip().rstrip()
except:
raise ValueError, "Could not parse file %s" % self.file
key = "[CATEGORY]"
if line.count(key)>0:
try:
index = line.index(key)
toks = line[index:].split("=")
self.category = toks[1].lstrip().rstrip()
except:
raise ValueError, "Could not parse file %s" % self.file
# is_multifunc
key = "[MULTIPLICITY_INFO]"
if line.count(key) > 0:
self.is_multifunc = True
try:
index = line.index(key)
toks = line[index:].split("=")
self.multiplicity_info = toks[1].lstrip().rstrip()
except:
raise ValueError, "Could not parse file %s" % self.file
# Catch struct name
# C++ class definition
if line.count("class")>0:
# We are entering a class definition
self.inParDefs = True
self.foundCPP = True
# Old-Style C struct definition
if line.count("typedef struct")>0:
# We are entering a struct block
self.inParDefs = True
self.inStruct = True
if self.inParDefs and line.count("}")>0:
# We are exiting a struct block
self.inParDefs = False
if self.inStruct:
self.inStruct = False
# Catch the name of the struct
index = line.index("}")
toks = line[index+1:].split(";")
# Catch pointer definition
toks2 = toks[0].split(',')
self.structName = toks2[0].lstrip().rstrip()
# Catch struct content
key = "[DEFAULT]"
if self.inParDefs and line.count(key)>0:
# Found a new parameter
try:
index = line.index(key)
toks = line[index:].split("=")
toks2 = toks[2].split()
val = float(toks2[0])
self.params[toks[1]] = val
#self.pythonClass = toks[1].lstrip().rstrip()
units = ""
if len(toks2) >= 2:
units = toks2[1]
self.default_list += " %-15s = %s %s\n" % \
(toks[1], val, units)
# Check for min and max
min = "None"
max = "None"
if len(toks2) == 4:
min = toks2[2]
max = toks2[3]
self.details += " self.details['%s'] = ['%s', %s, %s]\n" % \
(toks[1].lstrip().rstrip(), units.lstrip().rstrip(), min, max)
except:
raise ValueError, "Could not parse input file %s \n %s" % \
(self.file, sys.exc_value)
# Catch need for numerical calculations
key = "CalcParameters calcPars"
if line.count(key)>0:
self.modelCalcFlag = True
# Catch list of dispersed parameters
key = "[DISP_PARAMS]"
if line.count(key)>0:
try:
index = line.index(key)
toks = line[index:].split("=")
list_str = toks[1].lstrip().rstrip()
self.disp_params = list_str.split(',')
except:
raise ValueError, "Could not parse file %s" % self.file
def write_c_wrapper(self):
""" Writes the C file to create the python extension class
The file is written in C[PYTHONCLASS].c
"""
file_path = os.path.join(self.c_wrapper_dir,
"C"+self.pythonClass+'.cpp')
file = open(file_path, 'w')
template = open(os.path.join(os.path.dirname(__file__),
"classTemplate.txt"), 'r')
tmp_buf = template.read()
#tmp_lines = string.split(tmp_buf,'\n')
tmp_lines = tmp_buf.split('\n')
for tmp_line in tmp_lines:
# Catch class name
newline = self.replaceToken(tmp_line,
"[PYTHONCLASS]", 'C'+self.pythonClass)
#Catch model description
#newline = self.replaceToken(tmp_line,
# "[DESCRIPTION]", self.description)
# Catch C model name
newline = self.replaceToken(newline,
"[CMODEL]", self.pythonClass)
# Catch class name
newline = self.replaceToken(newline,
"[MODELSTRUCT]", self.structName)
# Sort model initialization based on multifunc
if(self.is_multifunc):
line = "int level = 1;\nPyArg_ParseTuple(args,\"i\",&level);\n"
line += "self->model = new " + self.pythonClass + "(level);"
else:
line = "self->model = new " + self.pythonClass + "();"
newline = self.replaceToken(newline,"[INITIALIZE_MODEL]",
line)
# Dictionary initialization
param_str = "// Initialize parameter dictionary\n"
for par in self.params:
param_str += " PyDict_SetItemString(self->params,\"%s\",Py_BuildValue(\"d\",%10.12f));\n" % \
(par, self.params[par])
if len(self.disp_params)>0:
param_str += " // Initialize dispersion / averaging parameter dict\n"
param_str += " DispersionVisitor* visitor = new DispersionVisitor();\n"
param_str += " PyObject * disp_dict;\n"
for par in self.disp_params:
par = par.strip()
param_str += " disp_dict = PyDict_New();\n"
param_str += " self->model->%s.dispersion->accept_as_source(visitor, self->model->%s.dispersion, disp_dict);\n" % (par, par)
param_str += " PyDict_SetItemString(self->dispersion, \"%s\", disp_dict);\n" % par
# Initialize dispersion object dictionnary
param_str += "\n"
newline = self.replaceToken(newline,
"[INITDICTIONARY]", param_str)
# Read dictionary
param_str = " // Reader parameter dictionary\n"
for par in self.params:
param_str += " self->model->%s = PyFloat_AsDouble( PyDict_GetItemString(self->params, \"%s\") );\n" % \
(par, par)
if len(self.disp_params)>0:
param_str += " // Read in dispersion parameters\n"
param_str += " PyObject* disp_dict;\n"
param_str += " DispersionVisitor* visitor = new DispersionVisitor();\n"
for par in self.disp_params:
par = par.strip()
param_str += " disp_dict = PyDict_GetItemString(self->dispersion, \"%s\");\n" % par
param_str += " self->model->%s.dispersion->accept_as_destination(visitor, self->model->%s.dispersion, disp_dict);\n" % (par, par)
newline = self.replaceToken(newline, "[READDICTIONARY]", param_str)
# Name of .c file
#toks = string.split(self.file,'.')
basename = os.path.basename(self.file)
toks = basename.split('.')
newline = self.replaceToken(newline, "[C_FILENAME]", toks[0])
# Include file
basename = os.path.basename(self.file)
newline = self.replaceToken(newline,
"[INCLUDE_FILE]", self.file)
if self.foundCPP:
newline = self.replaceToken(newline,
"[C_INCLUDE_FILE]", "")
newline = self.replaceToken(newline,
"[CPP_INCLUDE_FILE]", "#include \"%s\"" % basename)
else:
newline = self.replaceToken(newline,
"[C_INCLUDE_FILE]", "#include \"%s\"" % basename)
newline = self.replaceToken(newline,
"[CPP_INCLUDE_FILE]", "#include \"models.hh\"")
# Numerical calcs dealloc
dealloc_str = "\n"
if self.modelCalcFlag:
dealloc_str = " modelcalculations_dealloc(&(self->model_pars.calcPars));\n"
newline = self.replaceToken(newline,
"[NUMERICAL_DEALLOC]", dealloc_str)
# Numerical calcs init
init_str = "\n"
if self.modelCalcFlag:
init_str = " modelcalculations_init(&(self->model_pars.calcPars));\n"
newline = self.replaceToken(newline,
"[NUMERICAL_INIT]", init_str)
# Numerical calcs reset
reset_str = "\n"
if self.modelCalcFlag:
reset_str = "modelcalculations_reset(&(self->model_pars.calcPars));\n"
newline = self.replaceToken(newline,
"[NUMERICAL_RESET]", reset_str)
# Setting dispsertion weights
set_weights = " // Ugliness necessary to go from python to C\n"
set_weights = " // TODO: refactor this\n"
for par in self.disp_params:
par = par.strip()
set_weights += " if (!strcmp(par_name, \"%s\")) {\n" % par
set_weights += " self->model->%s.dispersion = dispersion;\n" % par
set_weights += " } else"
newline = self.replaceToken(newline,
"[SET_DISPERSION]", set_weights)
# Write new line to the wrapper .c file
file.write(newline+'\n')
file.close()
def write_python_wrapper(self):
""" Writes the python file to create the python extension class
The file is written in ../[PYTHONCLASS].py
"""
file_path = os.path.join(self.output_dir, self.pythonClass+'.py')
file = open(file_path, 'w')
template = open(os.path.join(os.path.dirname(__file__),
"modelTemplate.txt"), 'r')
tmp_buf = template.read()
tmp_lines = tmp_buf.split('\n')
for tmp_line in tmp_lines:
# Catch class name
newline = self.replaceToken(tmp_line,
"[CPYTHONCLASS]",
'C' + self.pythonClass)
# Catch class name
newline = self.replaceToken(newline,
"[PYTHONCLASS]", self.pythonClass)
# Include file
newline = self.replaceToken(newline,
"[INCLUDE_FILE]", self.file)
# Include file
newline = self.replaceToken(newline,
"[DEFAULT_LIST]", self.default_list)
# model description
newline = self.replaceToken(newline,
"[DESCRIPTION]", self.description)
# Parameter details
newline = self.replaceToken(newline,
"[PAR_DETAILS]", self.details)
# Call base constructor
if self.is_multifunc:
newline = self.replaceToken(newline,"[CALL_CPYTHON_INIT]",
'C' + self.pythonClass + \
".__init__(self,multfactor)\n\tself.is_multifunc = True")
newline = self.replaceToken(newline,"[MULTIPLICITY_INFO]",
self.multiplicity_info)
else:
newline = self.replaceToken(newline,"[CALL_CPYTHON_INIT]",
'C' + self.pythonClass + \
".__init__(self)\n self.is_multifunc = False")
newline = self.replaceToken(newline,
"[MULTIPLICITY_INFO]", "None")
# fixed list details
fixed_str = str(self.fixed)
fixed_str = fixed_str.replace(', ', ',\n ')
newline = self.replaceToken(newline, "[FIXED]", fixed_str)
# non-fittable list details
pars_str = str(self.non_fittable)
pars_str = pars_str.replace(', ',
',\n ')
newline = self.replaceToken(newline,
"[NON_FITTABLE_PARAMS]", pars_str)
## parameters with orientation
oriented_str = str(self.orientation_params)
formatted_endl = ',\n '
oriented_str = oriented_str.replace(', ', formatted_endl)
newline = self.replaceToken(newline,
"[ORIENTATION_PARAMS]", oriented_str)
## parameters with magnetism
newline = self.replaceToken(newline,
"[MAGNETIC_PARAMS]", str(self.magnetic_params))
if self.category:
newline = self.replaceToken(newline, "[CATEGORY]",
'"' + self.category + '"')
else:
newline = self.replaceToken(newline, "[CATEGORY]",
"None")
# Write new line to the wrapper .c file
file.write(newline+'\n')
file.close()
def replaceToken(self, line, key, value): #pylint: disable-msg=R0201
""" Replace a token in the template file
@param line: line of text to inspect
@param key: token to look for
@param value: string value to replace the token with
@return: new string value
"""
lenkey = len(key)
newline = line
while newline.count(key)>0:
index = newline.index(key)
newline = newline[:index]+value+newline[index+lenkey:]
return newline
def getModelName(self):
return self.pythonClass
# main
if __name__ == '__main__':
if len(sys.argv)>1:
print "Will look for file %s" % sys.argv[1]
app = WrapperGenerator(sys.argv[1])
else:
app = WrapperGenerator("test.h")
app.read()
app.write_c_wrapper()
app.write_python_wrapper()
print app
# End of file
minor fix for reading disper. param names
#!/usr/bin/env python
""" WrapperGenerator class to generate model code automatically.
"""
import os, sys,re
import lineparser
class WrapperGenerator:
""" Python wrapper generator for C models
The developer must provide a header file describing
the new model.
To provide the name of the Python class to be
generated, the .h file must contain the following
string in the comments:
// [PYTHONCLASS] = my_model
where my_model must be replaced by the name of the
class that you want to import from sans.models.
(example: [PYTHONCLASS] = MyModel
will create a class MyModel in sans.models.MyModel.
It will also create a class CMyModel in
sans_extension.c_models.)
Also in comments, each parameter of the params
dictionary must be declared with a default value
in the following way:
// [DEFAULT]=param_name=default_value
(example:
// [DEFAULT]=radius=20.0
)
See cylinder.h for an example.
A .c file corresponding to the .h file should also
be provided (example: my_model.h, my_model.c).
The .h file should define two function definitions. For example,
cylinder.h defines the following:
/// 1D scattering function
double cylinder_analytical_1D(CylinderParameters *pars, double q);
/// 2D scattering function
double cylinder_analytical_2D(CylinderParameters *pars, double q, double phi);
The .c file implements those functions.
@author: Mathieu Doucet / UTK
@contact: mathieu.doucet@nist.gov
"""
def __init__(self, filename, output_dir='.', c_wrapper_dir='.'):
""" Initialization """
## Name of .h file to generate wrapper from
self.file = filename
# Info read from file
## Name of python class to write
self.pythonClass = None
## Parser in struct section
self.inStruct = False
self.foundCPP = False
self.inParDefs = False
## Name of struct for the c object
self.structName = None
## Dictionary of parameters
self.params = {}
## ModelCalculation module flag
self.modelCalcFlag = False
## List of default parameters (text)
self.default_list = ""
## Dictionary of units
self.details = ""
## List of dispersed parameters
self.disp_params = []
#model description
self.description=''
# paramaters for fittable
self.fixed= []
# paramaters for non-fittable
self.non_fittable= []
## parameters with orientation
self.orientation_params =[]
## parameter with magnetism
self.magentic_params = []
# Model category
self.category = None
# Whether model belongs to multifunc
self.is_multifunc = False
## output directory for wrappers
self.output_dir = output_dir
self.c_wrapper_dir = c_wrapper_dir
def __repr__(self):
""" Simple output for printing """
rep = "\n Python class: %s\n\n" % self.pythonClass
rep += " struc name: %s\n\n" % self.structName
rep += " params: %s\n\n" % self.params
rep += " description: %s\n\n" % self.description
rep += " Fittable parameters: %s\n\n"% self.fixed
rep += " Non-Fittable parameters: %s\n\n"% self.non_fittable
rep += " Orientation parameters: %s\n\n"% self.orientation_params
rep += " Magnetic parameters: %s\n\n"% self.magnetic_params
return rep
def read(self):
""" Reads in the .h file to catch parameters of the wrapper """
# Check if the file is there
if not os.path.isfile(self.file):
raise ValueError, "File %s is not a regular file" % self.file
# Read file
f = open(self.file,'r')
buf = f.read()
self.default_list = "List of default parameters:\n"
#lines = string.split(buf,'\n')
lines = buf.split('\n')
self.details = "## Parameter details [units, min, max]\n"
self.details += " self.details = {}\n"
#open item in this case Fixed
text='text'
key2="<%s>"%text.lower()
# close an item in this case fixed
text='TexT'
key3="</%s>"%text.lower()
## Catch fixed parameters
key = "[FIXED]"
try:
self.fixed= lineparser.readhelper(lines, key,
key2, key3, file=self.file)
except:
raise
## Catch non-fittable parameters parameters
key = "[NON_FITTABLE_PARAMS]"
try:
self.non_fittable= lineparser.readhelper(lines, key, key2,
key3, file=self.file)
except:
raise
## Catch parameters with orientation
key = "[ORIENTATION_PARAMS]"
try:
self.orientation_params = lineparser.readhelper(lines, key,
key2, key3, file=self.file)
except:
raise
## Catch parameters with orientation
key = "[MAGNETIC_PARAMS]"
try:
self.magnetic_params = lineparser.readhelper( lines,key,
key2,key3, file= self.file)
except:
raise
## Catch Description
key = "[DESCRIPTION]"
find_description = False
temp=""
for line in lines:
if line.count(key)>0 :
try:
find_description= True
index = line.index(key)
toks = line[index:].split("=",1 )
temp=toks[1].lstrip().rstrip()
text='text'
key2="<%s>"%text.lower()
if re.match(key2,temp)!=None:
toks2=temp.split(key2,1)
self.description=toks2[1]
text='text'
key2="</%s>"%text.lower()
if re.search(key2,toks2[1])!=None:
temp=toks2[1].split(key2,1)
self.description=temp[0]
break
else:
self.description=temp
break
except:
raise ValueError, "Could not parse file %s" % self.file
elif find_description:
text='text'
key2="</%s>"%text.lower()
if re.search(key2,line)!=None:
tok=line.split(key2,1)
temp=tok[0].split("//",1)
self.description+=tok[1].lstrip().rstrip()
break
else:
if re.search("//",line)!=None:
temp=line.split("//",1)
self.description+='\n\t\t'+temp[1].lstrip().rstrip()
else:
self.description+='\n\t\t'+line.lstrip().rstrip()
for line in lines:
# Catch class name
key = "[PYTHONCLASS]"
if line.count(key)>0:
try:
index = line.index(key)
toks = line[index:].split("=" )
self.pythonClass = toks[1].lstrip().rstrip()
except:
raise ValueError, "Could not parse file %s" % self.file
key = "[CATEGORY]"
if line.count(key)>0:
try:
index = line.index(key)
toks = line[index:].split("=")
self.category = toks[1].lstrip().rstrip()
except:
raise ValueError, "Could not parse file %s" % self.file
# is_multifunc
key = "[MULTIPLICITY_INFO]"
if line.count(key) > 0:
self.is_multifunc = True
try:
index = line.index(key)
toks = line[index:].split("=")
self.multiplicity_info = toks[1].lstrip().rstrip()
except:
raise ValueError, "Could not parse file %s" % self.file
# Catch struct name
# C++ class definition
if line.count("class")>0:
# We are entering a class definition
self.inParDefs = True
self.foundCPP = True
# Old-Style C struct definition
if line.count("typedef struct")>0:
# We are entering a struct block
self.inParDefs = True
self.inStruct = True
if self.inParDefs and line.count("}")>0:
# We are exiting a struct block
self.inParDefs = False
if self.inStruct:
self.inStruct = False
# Catch the name of the struct
index = line.index("}")
toks = line[index+1:].split(";")
# Catch pointer definition
toks2 = toks[0].split(',')
self.structName = toks2[0].lstrip().rstrip()
# Catch struct content
key = "[DEFAULT]"
if self.inParDefs and line.count(key)>0:
# Found a new parameter
try:
index = line.index(key)
toks = line[index:].split("=")
toks2 = toks[2].split()
val = float(toks2[0])
self.params[toks[1]] = val
#self.pythonClass = toks[1].lstrip().rstrip()
units = ""
if len(toks2) >= 2:
units = toks2[1]
self.default_list += " %-15s = %s %s\n" % \
(toks[1], val, units)
# Check for min and max
min = "None"
max = "None"
if len(toks2) == 4:
min = toks2[2]
max = toks2[3]
self.details += " self.details['%s'] = ['%s', %s, %s]\n" % \
(toks[1].lstrip().rstrip(), units.lstrip().rstrip(), min, max)
except:
raise ValueError, "Could not parse input file %s \n %s" % \
(self.file, sys.exc_value)
# Catch need for numerical calculations
key = "CalcParameters calcPars"
if line.count(key)>0:
self.modelCalcFlag = True
# Catch list of dispersed parameters
key = "[DISP_PARAMS]"
if line.count(key)>0:
try:
index = line.index(key)
toks = line[index:].split("=")
list_str = toks[1].lstrip().rstrip()
self.disp_params = list_str.split(',')
except:
raise ValueError, "Could not parse file %s" % self.file
def write_c_wrapper(self):
""" Writes the C file to create the python extension class
The file is written in C[PYTHONCLASS].c
"""
file_path = os.path.join(self.c_wrapper_dir,
"C"+self.pythonClass+'.cpp')
file = open(file_path, 'w')
template = open(os.path.join(os.path.dirname(__file__),
"classTemplate.txt"), 'r')
tmp_buf = template.read()
#tmp_lines = string.split(tmp_buf,'\n')
tmp_lines = tmp_buf.split('\n')
for tmp_line in tmp_lines:
# Catch class name
newline = self.replaceToken(tmp_line,
"[PYTHONCLASS]", 'C'+self.pythonClass)
#Catch model description
#newline = self.replaceToken(tmp_line,
# "[DESCRIPTION]", self.description)
# Catch C model name
newline = self.replaceToken(newline,
"[CMODEL]", self.pythonClass)
# Catch class name
newline = self.replaceToken(newline,
"[MODELSTRUCT]", self.structName)
# Sort model initialization based on multifunc
if(self.is_multifunc):
line = "int level = 1;\nPyArg_ParseTuple(args,\"i\",&level);\n"
line += "self->model = new " + self.pythonClass + "(level);"
else:
line = "self->model = new " + self.pythonClass + "();"
newline = self.replaceToken(newline,"[INITIALIZE_MODEL]",
line)
# Dictionary initialization
param_str = "// Initialize parameter dictionary\n"
for par in self.params:
param_str += " PyDict_SetItemString(self->params,\"%s\",Py_BuildValue(\"d\",%10.12f));\n" % \
(par, self.params[par])
if len(self.disp_params)>0:
param_str += " // Initialize dispersion / averaging parameter dict\n"
param_str += " DispersionVisitor* visitor = new DispersionVisitor();\n"
param_str += " PyObject * disp_dict;\n"
for par in self.disp_params:
par = par.strip()
if par == '':
continue
param_str += " disp_dict = PyDict_New();\n"
param_str += " self->model->%s.dispersion->accept_as_source(visitor, self->model->%s.dispersion, disp_dict);\n" % (par, par)
param_str += " PyDict_SetItemString(self->dispersion, \"%s\", disp_dict);\n" % par
# Initialize dispersion object dictionnary
param_str += "\n"
newline = self.replaceToken(newline,
"[INITDICTIONARY]", param_str)
# Read dictionary
param_str = " // Reader parameter dictionary\n"
for par in self.params:
param_str += " self->model->%s = PyFloat_AsDouble( PyDict_GetItemString(self->params, \"%s\") );\n" % \
(par, par)
if len(self.disp_params)>0:
param_str += " // Read in dispersion parameters\n"
param_str += " PyObject* disp_dict;\n"
param_str += " DispersionVisitor* visitor = new DispersionVisitor();\n"
for par in self.disp_params:
par = par.strip()
if par == '':
continue
param_str += " disp_dict = PyDict_GetItemString(self->dispersion, \"%s\");\n" % par
param_str += " self->model->%s.dispersion->accept_as_destination(visitor, self->model->%s.dispersion, disp_dict);\n" % (par, par)
newline = self.replaceToken(newline, "[READDICTIONARY]", param_str)
# Name of .c file
#toks = string.split(self.file,'.')
basename = os.path.basename(self.file)
toks = basename.split('.')
newline = self.replaceToken(newline, "[C_FILENAME]", toks[0])
# Include file
basename = os.path.basename(self.file)
newline = self.replaceToken(newline,
"[INCLUDE_FILE]", self.file)
if self.foundCPP:
newline = self.replaceToken(newline,
"[C_INCLUDE_FILE]", "")
newline = self.replaceToken(newline,
"[CPP_INCLUDE_FILE]", "#include \"%s\"" % basename)
else:
newline = self.replaceToken(newline,
"[C_INCLUDE_FILE]", "#include \"%s\"" % basename)
newline = self.replaceToken(newline,
"[CPP_INCLUDE_FILE]", "#include \"models.hh\"")
# Numerical calcs dealloc
dealloc_str = "\n"
if self.modelCalcFlag:
dealloc_str = " modelcalculations_dealloc(&(self->model_pars.calcPars));\n"
newline = self.replaceToken(newline,
"[NUMERICAL_DEALLOC]", dealloc_str)
# Numerical calcs init
init_str = "\n"
if self.modelCalcFlag:
init_str = " modelcalculations_init(&(self->model_pars.calcPars));\n"
newline = self.replaceToken(newline,
"[NUMERICAL_INIT]", init_str)
# Numerical calcs reset
reset_str = "\n"
if self.modelCalcFlag:
reset_str = "modelcalculations_reset(&(self->model_pars.calcPars));\n"
newline = self.replaceToken(newline,
"[NUMERICAL_RESET]", reset_str)
# Setting dispsertion weights
set_weights = " // Ugliness necessary to go from python to C\n"
set_weights = " // TODO: refactor this\n"
for par in self.disp_params:
par = par.strip()
if par == '':
continue
set_weights += " if (!strcmp(par_name, \"%s\")) {\n" % par
set_weights += " self->model->%s.dispersion = dispersion;\n" % par
set_weights += " } else"
newline = self.replaceToken(newline,
"[SET_DISPERSION]", set_weights)
# Write new line to the wrapper .c file
file.write(newline+'\n')
file.close()
def write_python_wrapper(self):
""" Writes the python file to create the python extension class
The file is written in ../[PYTHONCLASS].py
"""
file_path = os.path.join(self.output_dir, self.pythonClass+'.py')
file = open(file_path, 'w')
template = open(os.path.join(os.path.dirname(__file__),
"modelTemplate.txt"), 'r')
tmp_buf = template.read()
tmp_lines = tmp_buf.split('\n')
for tmp_line in tmp_lines:
# Catch class name
newline = self.replaceToken(tmp_line,
"[CPYTHONCLASS]",
'C' + self.pythonClass)
# Catch class name
newline = self.replaceToken(newline,
"[PYTHONCLASS]", self.pythonClass)
# Include file
newline = self.replaceToken(newline,
"[INCLUDE_FILE]", self.file)
# Include file
newline = self.replaceToken(newline,
"[DEFAULT_LIST]", self.default_list)
# model description
newline = self.replaceToken(newline,
"[DESCRIPTION]", self.description)
# Parameter details
newline = self.replaceToken(newline,
"[PAR_DETAILS]", self.details)
# Call base constructor
if self.is_multifunc:
newline = self.replaceToken(newline,"[CALL_CPYTHON_INIT]",
'C' + self.pythonClass + \
".__init__(self,multfactor)\n\tself.is_multifunc = True")
newline = self.replaceToken(newline,"[MULTIPLICITY_INFO]",
self.multiplicity_info)
else:
newline = self.replaceToken(newline,"[CALL_CPYTHON_INIT]",
'C' + self.pythonClass + \
".__init__(self)\n self.is_multifunc = False")
newline = self.replaceToken(newline,
"[MULTIPLICITY_INFO]", "None")
# fixed list details
fixed_str = str(self.fixed)
fixed_str = fixed_str.replace(', ', ',\n ')
newline = self.replaceToken(newline, "[FIXED]", fixed_str)
# non-fittable list details
pars_str = str(self.non_fittable)
pars_str = pars_str.replace(', ',
',\n ')
newline = self.replaceToken(newline,
"[NON_FITTABLE_PARAMS]", pars_str)
## parameters with orientation
oriented_str = str(self.orientation_params)
formatted_endl = ',\n '
oriented_str = oriented_str.replace(', ', formatted_endl)
newline = self.replaceToken(newline,
"[ORIENTATION_PARAMS]", oriented_str)
## parameters with magnetism
newline = self.replaceToken(newline,
"[MAGNETIC_PARAMS]", str(self.magnetic_params))
if self.category:
newline = self.replaceToken(newline, "[CATEGORY]",
'"' + self.category + '"')
else:
newline = self.replaceToken(newline, "[CATEGORY]",
"None")
# Write new line to the wrapper .c file
file.write(newline+'\n')
file.close()
def replaceToken(self, line, key, value): #pylint: disable-msg=R0201
""" Replace a token in the template file
@param line: line of text to inspect
@param key: token to look for
@param value: string value to replace the token with
@return: new string value
"""
lenkey = len(key)
newline = line
while newline.count(key)>0:
index = newline.index(key)
newline = newline[:index]+value+newline[index+lenkey:]
return newline
def getModelName(self):
return self.pythonClass
# main
if __name__ == '__main__':
if len(sys.argv)>1:
print "Will look for file %s" % sys.argv[1]
app = WrapperGenerator(sys.argv[1])
else:
app = WrapperGenerator("test.h")
app.read()
app.write_c_wrapper()
app.write_python_wrapper()
print app
# End of file
|
__version__ = '1.1.2'
v1.1.3
__version__ = '1.1.3'
|
from django.core.management.base import NoArgsCommand
import sys
try:
from decimal import Decimal
except:
from django.utils._decimal import Decimal
class Command(NoArgsCommand):
help = "Check the system to see if the Satchmo components are installed correctly."
def handle_noargs(self, **options):
from django.conf import settings
errors = []
print "Checking your satchmo configuration."
try:
import satchmo
except ImportError:
errors.append("Satchmo is not installed correctly. Please verify satchmo is on your sys path.")
try:
import Crypto.Cipher
except ImportError:
errors.append("The Python Cryptography Toolkit is not installed.")
try:
import Image
except ImportError:
errors.append("The Python Imaging Library is not installed.")
try:
import reportlab
except ImportError:
errors.append("Reportlab is not installed.")
try:
import trml2pdf
except ImportError:
errors.append("Tiny RML2PDF is not installed.")
try:
import comment_utils
except ImportError:
errors.append("Django comment_utils is not installed.")
try:
import registration
except ImportError:
errors.append("Django registration is not installed.")
try:
import yaml
except ImportError:
errors.append("YAML is not installed.")
try:
from satchmo.l10n.utils import get_locale_conv
get_locale_conv()
except:
errors.append("Locale is not set correctly. On unix systems, try executing locale-gen.")
try:
cache_avail = settings.CACHE_BACKEND
except AttributeError:
errors.append("A CACHE_BACKEND must be configured.")
if 'satchmo.shop.SSLMiddleware.SSLRedirect' not in settings.MIDDLEWARE_CLASSES:
errors.append("You must have satchmo.shop.SSLMiddleware.SSLRedirect in your MIDDLEWARE_CLASSES.")
if 'satchmo.shop.context_processors.settings' not in settings.TEMPLATE_CONTEXT_PROCESSORS:
errors.append("You must have satchmo.shop.context_processors.settings in your TEMPLATE_CONTEXT_PROCESSORS.")
if 'satchmo.accounts.email-auth.EmailBackend' not in settings.AUTHENTICATION_BACKENDS:
errors.append("You must have satchmo.accounts.email-auth.EmailBackend in your AUTHENTICATION_BACKENDS")
python_ver = Decimal("%s.%s" % (sys.version_info[0], sys.version_info[1]))
if python_ver < Decimal("2.4"):
errors.append("Python version must be at least 2.4.")
if python_ver < Decimal("2.5"):
try:
from xml.etree.ElementTree import Element
except ImportError:
errors.append("Elementtree is not installed.")
if len(errors) == 0:
print "Your configuration has no errors."
else:
print "The following errors were found:"
for error in errors:
print error
fixing improper import, closes #347
--HG--
extra : convert_revision : svn%3Aa38d40e9-c014-0410-b785-c606c0c8e7de/satchmo/trunk%401162
from django.core.management.base import NoArgsCommand
import sys
try:
from decimal import Decimal
except:
from django.utils._decimal import Decimal
class Command(NoArgsCommand):
help = "Check the system to see if the Satchmo components are installed correctly."
def handle_noargs(self, **options):
from django.conf import settings
errors = []
print "Checking your satchmo configuration."
try:
import satchmo
except ImportError:
errors.append("Satchmo is not installed correctly. Please verify satchmo is on your sys path.")
try:
import Crypto.Cipher
except ImportError:
errors.append("The Python Cryptography Toolkit is not installed.")
try:
import Image
except ImportError:
errors.append("The Python Imaging Library is not installed.")
try:
import reportlab
except ImportError:
errors.append("Reportlab is not installed.")
try:
import trml2pdf
except ImportError:
errors.append("Tiny RML2PDF is not installed.")
try:
import comment_utils
except ImportError:
errors.append("Django comment_utils is not installed.")
try:
import registration
except ImportError:
errors.append("Django registration is not installed.")
try:
import yaml
except ImportError:
errors.append("YAML is not installed.")
try:
from satchmo.l10n.utils import get_locale_conv
get_locale_conv()
except:
errors.append("Locale is not set correctly. On unix systems, try executing locale-gen.")
try:
cache_avail = settings.CACHE_BACKEND
except AttributeError:
errors.append("A CACHE_BACKEND must be configured.")
if 'satchmo.shop.SSLMiddleware.SSLRedirect' not in settings.MIDDLEWARE_CLASSES:
errors.append("You must have satchmo.shop.SSLMiddleware.SSLRedirect in your MIDDLEWARE_CLASSES.")
if 'satchmo.shop.context_processors.settings' not in settings.TEMPLATE_CONTEXT_PROCESSORS:
errors.append("You must have satchmo.shop.context_processors.settings in your TEMPLATE_CONTEXT_PROCESSORS.")
if 'satchmo.accounts.email-auth.EmailBackend' not in settings.AUTHENTICATION_BACKENDS:
errors.append("You must have satchmo.accounts.email-auth.EmailBackend in your AUTHENTICATION_BACKENDS")
python_ver = Decimal("%s.%s" % (sys.version_info[0], sys.version_info[1]))
if python_ver < Decimal("2.4"):
errors.append("Python version must be at least 2.4.")
if python_ver < Decimal("2.5"):
try:
from elementtree.ElementTree import Element
except ImportError:
errors.append("Elementtree is not installed.")
if len(errors) == 0:
print "Your configuration has no errors."
else:
print "The following errors were found:"
for error in errors:
print error
|
__version__ = "3.0b4"
Version bump to 3.0
__version__ = "3.0"
|
#!/usr/bin/env python
#
# svnsync_tests.py: Tests SVNSync's repository mirroring capabilities.
#
# Subversion is a tool for revision control.
# See http://subversion.tigris.org for more information.
#
# ====================================================================
# Copyright (c) 2005 CollabNet. All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://subversion.tigris.org/license-1.html.
# If newer versions of this license are posted there, you may use a
# newer version instead, at your option.
#
######################################################################
# General modules
import string, sys, re, os.path
# Our testing module
import svntest
# (abbreviation)
Skip = svntest.testcase.Skip
XFail = svntest.testcase.XFail
Item = svntest.wc.StateItem
######################################################################
# Helper routines
def build_repos(sbox):
"""Avoid the use sbox.build() because we're working with a repos
other than the Greek tree."""
# Cleanup after the last run by removing any left-over repository.
svntest.main.safe_rmtree(sbox.repo_dir)
# Create an empty repository.
svntest.main.create_repos(sbox.repo_dir)
svntest.main.set_repos_paths(sbox.repo_dir)
def run_and_verify_load(repo_dir, dump_file_content):
"Runs 'svnadmin load' and reports any errors."
expected_stderr = []
output, errput = \
svntest.main.run_command_stdin(
"%s load --force-uuid --quiet %s" % (svntest.main.svnadmin_binary,
repo_dir),
expected_stderr, 1, dump_file_content)
if expected_stderr:
svntest.actions.compare_and_display_lines(
"Standard error output", "STDERR", expected_stderr, errput)
def run_test(sbox, dump_file_name):
"Load a dump file, sync repositories, and compare contents."
# Create the empty master repository.
build_repos(sbox)
# Load the specified dump file into the master repository.
master_dumpfile_contents = file(os.path.join("svnsync_tests_data",
dump_file_name)).readlines()
run_and_verify_load(sbox.repo_dir, master_dumpfile_contents)
# Create the empty destination repository.
dest_sbox = sbox.clone_dependent()
build_repos(dest_sbox)
# Setup the mirror repository. Feed it the UUID of the source repository.
output, errput = svntest.main.run_svnlook("uuid", sbox.repo_dir)
mirror_cfg = ["SVN-fs-dump-format-version: 2\n",
"UUID: " + output[0],
]
run_and_verify_load(dest_sbox.repo_dir, mirror_cfg)
# Create the revprop-change hook for this test
svntest.actions.enable_revprop_changes(svntest.main.current_repo_dir)
# Initialize the mirror repository from the master.
output, errput = svntest.main.run_svnsync(
"initialize", dest_sbox.repo_url, "--source-url", sbox.repo_url)
if output:
raise svntest.actions.SVNUnexpectedStdout(output)
if errput:
raise svntest.actions.SVNUnexpectedStderr(errput)
# Synchronize the mirror repository with the master.
output, errput = svntest.main.run_svnsync(
"synchronize", dest_sbox.repo_url)
if not output:
# should be: ['Committing rev 1\n', 'Committing rev 2\n']
raise svntest.actions.SVNUnexpectedStdout("Missing stdout")
if errput:
raise svntest.actions.SVNUnexpectedStderr(errput)
# Remove some SVNSync-specific housekeeping properties from the
# mirror repository in preparation for the comparison dump.
for prop_name in ("svn:sync-from-url", "svn:sync-from-uuid",
"svn:sync-last-merged-rev"):
svntest.actions.run_and_verify_svn(None, None, [],
"propdel", "--revprop", "-r", "0",
prop_name, dest_sbox.repo_url)
# Create a dump file from the mirror repository.
output, errput = svntest.main.run_svnadmin("dump", dest_sbox.repo_dir)
if not output:
raise svntest.actions.SVNUnexpectedStdout("Missing stdout")
if not errput:
raise svntest.actions.SVNUnexpectedStderr("Missing stderr")
dest_dump = output
# Compare the original dump file (used to create the master
# repository) with the dump produced by the mirror repository.
svntest.actions.compare_and_display_lines(
"Dump files", "DUMP", master_dumpfile_contents, dest_dump)
######################################################################
# Tests
#----------------------------------------------------------------------
def copy_and_modify(sbox):
"copy and modify"
run_test(sbox, "copy-and-modify.dump")
#----------------------------------------------------------------------
def copy_from_previous_version_and_modify(sbox):
"copy from previous version and modify"
run_test(sbox, "copy-from-previous-version-and-modify.dump")
#----------------------------------------------------------------------
def copy_from_previous_version(sbox):
"copy from previous version"
run_test(sbox, "copy-from-previous-version.dump")
#----------------------------------------------------------------------
def modified_in_place(sbox):
"modified in place"
run_test(sbox, "modified-in-place.dump")
#----------------------------------------------------------------------
def tag_empty_trunk(sbox):
"tag empty trunk"
run_test(sbox, "tag-empty-trunk.dump")
#----------------------------------------------------------------------
def tag_trunk_with_dir(sbox):
"tag trunk containing a sub-directory"
run_test(sbox, "tag-trunk-with-dir.dump")
#----------------------------------------------------------------------
def tag_trunk_with_file(sbox):
"tag trunk containing a file"
run_test(sbox, "tag-trunk-with-file.dump")
#----------------------------------------------------------------------
def tag_trunk_with_file2(sbox):
"tag trunk containing a file (#2)"
run_test(sbox, "tag-trunk-with-file2.dump")
#----------------------------------------------------------------------
def tag_with_modified_file(sbox):
"tag with a modified file"
run_test(sbox, "tag-with-modified-file.dump")
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
copy_and_modify,
copy_from_previous_version_and_modify,
copy_from_previous_version,
modified_in_place,
tag_empty_trunk,
tag_trunk_with_dir,
tag_trunk_with_file2,
tag_trunk_with_file,
tag_with_modified_file,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
Tweak svnsync tests so they work like the getopt tests when
looking for their data files.
* subversion/tests/cmdline/svnsync_tests.py
(run_test): Use the script path to locate data files, and
set the eol-style to native.
#!/usr/bin/env python
#
# svnsync_tests.py: Tests SVNSync's repository mirroring capabilities.
#
# Subversion is a tool for revision control.
# See http://subversion.tigris.org for more information.
#
# ====================================================================
# Copyright (c) 2005 CollabNet. All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://subversion.tigris.org/license-1.html.
# If newer versions of this license are posted there, you may use a
# newer version instead, at your option.
#
######################################################################
# General modules
import string, sys, re, os.path
# Our testing module
import svntest
# (abbreviation)
Skip = svntest.testcase.Skip
XFail = svntest.testcase.XFail
Item = svntest.wc.StateItem
######################################################################
# Helper routines
def build_repos(sbox):
"""Avoid the use sbox.build() because we're working with a repos
other than the Greek tree."""
# Cleanup after the last run by removing any left-over repository.
svntest.main.safe_rmtree(sbox.repo_dir)
# Create an empty repository.
svntest.main.create_repos(sbox.repo_dir)
svntest.main.set_repos_paths(sbox.repo_dir)
def run_and_verify_load(repo_dir, dump_file_content):
"Runs 'svnadmin load' and reports any errors."
expected_stderr = []
output, errput = \
svntest.main.run_command_stdin(
"%s load --force-uuid --quiet %s" % (svntest.main.svnadmin_binary,
repo_dir),
expected_stderr, 1, dump_file_content)
if expected_stderr:
svntest.actions.compare_and_display_lines(
"Standard error output", "STDERR", expected_stderr, errput)
def run_test(sbox, dump_file_name):
"Load a dump file, sync repositories, and compare contents."
# Create the empty master repository.
build_repos(sbox)
# This directory contains all the dump files
svnsync_tests_dir = os.path.join(os.path.dirname(sys.argv[0]),
'svnsync_tests_data')
# Load the specified dump file into the master repository.
master_dumpfile_contents = file(os.path.join(svnsync_tests_dir,
dump_file_name)).readlines()
run_and_verify_load(sbox.repo_dir, master_dumpfile_contents)
# Create the empty destination repository.
dest_sbox = sbox.clone_dependent()
build_repos(dest_sbox)
# Setup the mirror repository. Feed it the UUID of the source repository.
output, errput = svntest.main.run_svnlook("uuid", sbox.repo_dir)
mirror_cfg = ["SVN-fs-dump-format-version: 2\n",
"UUID: " + output[0],
]
run_and_verify_load(dest_sbox.repo_dir, mirror_cfg)
# Create the revprop-change hook for this test
svntest.actions.enable_revprop_changes(svntest.main.current_repo_dir)
# Initialize the mirror repository from the master.
output, errput = svntest.main.run_svnsync(
"initialize", dest_sbox.repo_url, "--source-url", sbox.repo_url)
if output:
raise svntest.actions.SVNUnexpectedStdout(output)
if errput:
raise svntest.actions.SVNUnexpectedStderr(errput)
# Synchronize the mirror repository with the master.
output, errput = svntest.main.run_svnsync(
"synchronize", dest_sbox.repo_url)
if not output:
# should be: ['Committing rev 1\n', 'Committing rev 2\n']
raise svntest.actions.SVNUnexpectedStdout("Missing stdout")
if errput:
raise svntest.actions.SVNUnexpectedStderr(errput)
# Remove some SVNSync-specific housekeeping properties from the
# mirror repository in preparation for the comparison dump.
for prop_name in ("svn:sync-from-url", "svn:sync-from-uuid",
"svn:sync-last-merged-rev"):
svntest.actions.run_and_verify_svn(None, None, [],
"propdel", "--revprop", "-r", "0",
prop_name, dest_sbox.repo_url)
# Create a dump file from the mirror repository.
output, errput = svntest.main.run_svnadmin("dump", dest_sbox.repo_dir)
if not output:
raise svntest.actions.SVNUnexpectedStdout("Missing stdout")
if not errput:
raise svntest.actions.SVNUnexpectedStderr("Missing stderr")
dest_dump = output
# Compare the original dump file (used to create the master
# repository) with the dump produced by the mirror repository.
svntest.actions.compare_and_display_lines(
"Dump files", "DUMP", master_dumpfile_contents, dest_dump)
######################################################################
# Tests
#----------------------------------------------------------------------
def copy_and_modify(sbox):
"copy and modify"
run_test(sbox, "copy-and-modify.dump")
#----------------------------------------------------------------------
def copy_from_previous_version_and_modify(sbox):
"copy from previous version and modify"
run_test(sbox, "copy-from-previous-version-and-modify.dump")
#----------------------------------------------------------------------
def copy_from_previous_version(sbox):
"copy from previous version"
run_test(sbox, "copy-from-previous-version.dump")
#----------------------------------------------------------------------
def modified_in_place(sbox):
"modified in place"
run_test(sbox, "modified-in-place.dump")
#----------------------------------------------------------------------
def tag_empty_trunk(sbox):
"tag empty trunk"
run_test(sbox, "tag-empty-trunk.dump")
#----------------------------------------------------------------------
def tag_trunk_with_dir(sbox):
"tag trunk containing a sub-directory"
run_test(sbox, "tag-trunk-with-dir.dump")
#----------------------------------------------------------------------
def tag_trunk_with_file(sbox):
"tag trunk containing a file"
run_test(sbox, "tag-trunk-with-file.dump")
#----------------------------------------------------------------------
def tag_trunk_with_file2(sbox):
"tag trunk containing a file (#2)"
run_test(sbox, "tag-trunk-with-file2.dump")
#----------------------------------------------------------------------
def tag_with_modified_file(sbox):
"tag with a modified file"
run_test(sbox, "tag-with-modified-file.dump")
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
copy_and_modify,
copy_from_previous_version_and_modify,
copy_from_previous_version,
modified_in_place,
tag_empty_trunk,
tag_trunk_with_dir,
tag_trunk_with_file2,
tag_trunk_with_file,
tag_with_modified_file,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
|
#!/usr/bin/env python
#
# upgrade_tests.py: test the working copy upgrade process
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
#
# These tests exercise the upgrade capabilities of 'svn upgrade' as it
# moves working copies between wc-1 and wc-ng.
#
import os
import re
import shutil
import sys
import tarfile
import tempfile
import logging
logger = logging.getLogger()
import svntest
from svntest import wc
Item = svntest.wc.StateItem
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
wc_is_too_old_regex = (".*is too old \(format \d+.*\).*")
def get_current_format():
# Get current format from subversion/libsvn_wc/wc.h
format_file = open(os.path.join(os.path.dirname(__file__), "..", "..", "libsvn_wc", "wc.h")).read()
return int(re.search("\n#define SVN_WC__VERSION (\d+)\n", format_file).group(1))
def replace_sbox_with_tarfile(sbox, tar_filename,
dir=None):
try:
svntest.main.safe_rmtree(sbox.wc_dir)
except OSError, e:
pass
if not dir:
dir = tar_filename.split('.')[0]
tarpath = os.path.join(os.path.dirname(sys.argv[0]), 'upgrade_tests_data',
tar_filename)
t = tarfile.open(tarpath, 'r:bz2')
extract_dir = tempfile.mkdtemp(dir=svntest.main.temp_dir)
for member in t.getmembers():
t.extract(member, extract_dir)
shutil.move(os.path.join(extract_dir, dir), sbox.wc_dir)
def replace_sbox_repo_with_tarfile(sbox, tar_filename, dir=None):
try:
svntest.main.safe_rmtree(sbox.repo_dir)
except OSError, e:
pass
if not dir:
dir = tar_filename.split('.')[0]
tarpath = os.path.join(os.path.dirname(sys.argv[0]), 'upgrade_tests_data',
tar_filename)
t = tarfile.open(tarpath, 'r:bz2')
extract_dir = tempfile.mkdtemp(dir=svntest.main.temp_dir)
for member in t.getmembers():
t.extract(member, extract_dir)
shutil.move(os.path.join(extract_dir, dir), sbox.repo_dir)
def check_format(sbox, expected_format):
dot_svn = svntest.main.get_admin_name()
for root, dirs, files in os.walk(sbox.wc_dir):
db = svntest.sqlite3.connect(os.path.join(root, dot_svn, 'wc.db'))
c = db.cursor()
c.execute('pragma user_version;')
found_format = c.fetchone()[0]
db.close()
if found_format != expected_format:
raise svntest.Failure("found format '%d'; expected '%d'; in wc '%s'" %
(found_format, expected_format, root))
dirs[:] = []
if dot_svn in dirs:
dirs.remove(dot_svn)
def check_pristine(sbox, files):
for file in files:
file_path = sbox.ospath(file)
file_text = open(file_path, 'r').read()
file_pristine = open(svntest.wc.text_base_path(file_path), 'r').read()
if (file_text != file_pristine):
raise svntest.Failure("pristine mismatch for '%s'" % (file))
def check_dav_cache(dir_path, wc_id, expected_dav_caches):
dot_svn = svntest.main.get_admin_name()
db = svntest.sqlite3.connect(os.path.join(dir_path, dot_svn, 'wc.db'))
c = db.cursor()
# Check if python's sqlite can read our db
c.execute('select sqlite_version()')
sqlite_ver = map(int, c.fetchone()[0].split('.'))
# SQLite versions have 3 or 4 number groups
major = sqlite_ver[0]
minor = sqlite_ver[1]
patch = sqlite_ver[2]
if major < 3 or (major == 3 and minor < 6) \
or (major == 3 and minor == 6 and patch < 18):
return # We need a newer SQLite
for local_relpath, expected_dav_cache in expected_dav_caches.items():
# NODES conversion is complete enough that we can use it if it exists
c.execute("""pragma table_info(nodes)""")
if c.fetchone():
c.execute('select dav_cache from nodes ' +
'where wc_id=? and local_relpath=? and op_depth = 0',
(wc_id, local_relpath))
row = c.fetchone()
else:
c.execute('select dav_cache from base_node ' +
'where wc_id=? and local_relpath=?',
(wc_id, local_relpath))
row = c.fetchone()
if row is None:
raise svntest.Failure("no dav cache for '%s'" % (local_relpath))
dav_cache = str(row[0])
if dav_cache != expected_dav_cache:
raise svntest.Failure(
"wrong dav cache for '%s'\n Found: '%s'\n Expected: '%s'" %
(local_relpath, dav_cache, expected_dav_cache))
db.close()
# Very simple working copy property diff handler for single line textual properties
# Should probably be moved to svntest/actions.py after some major refactoring.
def simple_property_verify(dir_path, expected_props):
# Shows all items in dict1 that are not also in dict2
def diff_props(dict1, dict2, name, match):
equal = True;
for key in dict1:
node = dict1[key]
node2 = dict2.get(key, None)
if node2:
for prop in node:
v1 = node[prop]
v2 = node2.get(prop, None)
if not v2:
logger.warn('\'%s\' property on \'%s\' not found in %s',
prop, key, name)
equal = False
if match and v1 != v2:
logger.warn('Expected \'%s\' on \'%s\' to be \'%s\', but found \'%s\'',
prop, key, v1, v2)
equal = False
else:
logger.warn('\'%s\': %s not found in %s', key, dict1[key], name)
equal = False
return equal
exit_code, output, errput = svntest.main.run_svn(None, 'proplist', '-R',
'-v', dir_path)
actual_props = {}
target = None
name = None
for i in output:
if i.startswith('Properties on '):
target = i[15+len(dir_path)+1:-3].replace(os.path.sep, '/')
elif not i.startswith(' '):
name = i.strip()
else:
v = actual_props.get(target, {})
v[name] = i.strip()
actual_props[target] = v
v1 = diff_props(expected_props, actual_props, 'actual', True)
v2 = diff_props(actual_props, expected_props, 'expected', False)
if not v1 or not v2:
logger.warn('Actual properties: %s', actual_props)
raise svntest.Failure("Properties unequal")
def simple_checksum_verify(expected_checksums):
for path, checksum in expected_checksums:
exit_code, output, errput = svntest.main.run_svn(None, 'info', path)
if exit_code:
raise svntest.Failure()
if checksum:
if not svntest.verify.RegexOutput('Checksum: ' + checksum,
match_all=False).matches(output):
raise svntest.Failure("did not get expected checksum " + checksum)
if not checksum:
if svntest.verify.RegexOutput('Checksum: ',
match_all=False).matches(output):
raise svntest.Failure("unexpected checksum")
def run_and_verify_status_no_server(wc_dir, expected_status):
"same as svntest.actions.run_and_verify_status(), but without '-u'"
exit_code, output, errput = svntest.main.run_svn(None, 'st', '-q', '-v',
wc_dir)
actual = svntest.tree.build_tree_from_status(output)
try:
svntest.tree.compare_trees("status", actual, expected_status.old_tree())
except svntest.tree.SVNTreeError:
svntest.verify.display_trees(None, 'STATUS OUTPUT TREE',
expected_status.old_tree(), actual)
logger.warn("ACTUAL STATUS TREE:")
svntest.tree.dump_tree_script(actual, wc_dir + os.sep)
raise
def basic_upgrade(sbox):
"basic upgrade behavior"
replace_sbox_with_tarfile(sbox, 'basic_upgrade.tar.bz2')
# Attempt to use the working copy, this should give an error
svntest.actions.run_and_verify_svn(None, wc_is_too_old_regex,
'info', sbox.wc_dir)
# Upgrade on something anywhere within a versioned subdir gives a
# 'not a working copy root' error. Upgrade on something without any
# versioned parent gives a 'not a working copy' error.
# Both cases use the same error code.
not_wc = ".*(E155007|E155019).*%s'.*not a working copy.*"
os.mkdir(sbox.ospath('X'))
svntest.actions.run_and_verify_svn(None, not_wc % 'X',
'upgrade', sbox.ospath('X'))
# Upgrade on a non-existent subdir within an old WC gives a
# 'not a working copy' error.
svntest.actions.run_and_verify_svn(None, not_wc % 'Y',
'upgrade', sbox.ospath('Y'))
# Upgrade on a versioned file within an old WC gives a
# 'not a working copy' error.
svntest.actions.run_and_verify_svn(None, not_wc % 'mu',
'upgrade', sbox.ospath('A/mu'))
# Upgrade on a versioned dir within an old WC gives a
# 'not a working copy' error.
svntest.actions.run_and_verify_svn(None, not_wc % 'A',
'upgrade', sbox.ospath('A'))
# Now upgrade the working copy
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
# Actually check the format number of the upgraded working copy
check_format(sbox, get_current_format())
# Now check the contents of the working copy
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
check_pristine(sbox, ['iota', 'A/mu'])
def upgrade_with_externals(sbox):
"upgrade with externals"
# Create wc from tarfile, uses the same structure of the wc as the tests
# in externals_tests.py.
replace_sbox_with_tarfile(sbox, 'upgrade_with_externals.tar.bz2')
# Attempt to use the working copy, this should give an error
expected_stderr = wc_is_too_old_regex
svntest.actions.run_and_verify_svn(None, expected_stderr,
'info', sbox.wc_dir)
# Now upgrade the working copy
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
# Actually check the format number of the upgraded working copy
check_format(sbox, get_current_format())
check_pristine(sbox, ['iota', 'A/mu',
'A/D/x/lambda', 'A/D/x/E/alpha'])
def upgrade_1_5_body(sbox, subcommand):
replace_sbox_with_tarfile(sbox, 'upgrade_1_5.tar.bz2')
# Attempt to use the working copy, this should give an error
expected_stderr = wc_is_too_old_regex
svntest.actions.run_and_verify_svn(None, expected_stderr,
subcommand, sbox.wc_dir)
# Now upgrade the working copy
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
# Check the format of the working copy
check_format(sbox, get_current_format())
# Now check the contents of the working copy
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
check_pristine(sbox, ['iota', 'A/mu'])
def upgrade_1_5(sbox):
"test upgrading from a 1.5-era working copy"
return upgrade_1_5_body(sbox, 'info')
def update_1_5(sbox):
"test updating a 1.5-era working copy"
# The 'update' printed:
# Skipped 'svn-test-work\working_copies\upgrade_tests-3'
# Summary of conflicts:
# Skipped paths: 1
return upgrade_1_5_body(sbox, 'update')
def logs_left_1_5(sbox):
"test upgrading from a 1.5-era wc with stale logs"
replace_sbox_with_tarfile(sbox, 'logs_left_1_5.tar.bz2')
# Try to upgrade, this should give an error
expected_stderr = (".*Cannot upgrade with existing logs; .*")
svntest.actions.run_and_verify_svn(None, expected_stderr,
'upgrade', sbox.wc_dir)
def upgrade_wcprops(sbox):
"test upgrading a working copy with wcprops"
replace_sbox_with_tarfile(sbox, 'upgrade_wcprops.tar.bz2')
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
# Make sure that .svn/all-wcprops has disappeared
dot_svn = svntest.main.get_admin_name()
if os.path.exists(os.path.join(sbox.wc_dir, dot_svn, 'all-wcprops')):
raise svntest.Failure("all-wcprops file still exists")
# Just for kicks, let's see if the wcprops are what we'd expect them
# to be. (This could be smarter.)
expected_dav_caches = {
'' :
'(svn:wc:ra_dav:version-url 41 /svn-test-work/local_tmp/repos/!svn/ver/1)',
'iota' :
'(svn:wc:ra_dav:version-url 46 /svn-test-work/local_tmp/repos/!svn/ver/1/iota)',
}
check_dav_cache(sbox.wc_dir, 1, expected_dav_caches)
# Poor mans relocate to fix up an 1.0 (xml style) working copy to refer to a
# valid repository, so svn upgrade can do its work on it
def xml_entries_relocate(path, from_url, to_url):
adm_name = svntest.main.get_admin_name()
entries = os.path.join(path, adm_name, 'entries')
txt = open(entries).read().replace('url="' + from_url, 'url="' + to_url)
os.chmod(entries, 0777)
open(entries, 'w').write(txt)
for dirent in os.listdir(path):
item_path = os.path.join(path, dirent)
if dirent == svntest.main.get_admin_name():
continue
if os.path.isdir(os.path.join(item_path, adm_name)):
xml_entries_relocate(item_path, from_url, to_url)
# Poor mans relocate to fix up an working copy to refer to a
# valid repository, so svn upgrade can do its work on it
def simple_entries_replace(path, from_url, to_url):
adm_name = svntest.main.get_admin_name()
entries = os.path.join(path, adm_name, 'entries')
txt = open(entries).read().replace(from_url, to_url)
os.chmod(entries, 0777)
open(entries, 'wb').write(txt)
for dirent in os.listdir(path):
item_path = os.path.join(path, dirent)
if dirent == svntest.main.get_admin_name():
continue
if os.path.isdir(os.path.join(item_path, adm_name)):
simple_entries_replace(item_path, from_url, to_url)
def basic_upgrade_1_0(sbox):
"test upgrading a working copy created with 1.0.0"
sbox.build(create_wc = False)
replace_sbox_with_tarfile(sbox, 'upgrade_1_0.tar.bz2')
url = sbox.repo_url
# This is non-canonical by the rules of svn_uri_canonicalize, it gets
# written into the entries file and upgrade has to canonicalize.
non_canonical_url = url[:-1] + '%%%02x' % ord(url[-1])
xml_entries_relocate(sbox.wc_dir, 'file:///1.0.0/repos', non_canonical_url)
# Attempt to use the working copy, this should give an error
expected_stderr = wc_is_too_old_regex
svntest.actions.run_and_verify_svn(None, expected_stderr,
'info', sbox.wc_dir)
# Now upgrade the working copy
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
# And the separate working copy below COPIED or check_format() fails
svntest.actions.run_and_verify_svn(None, [],
'upgrade',
os.path.join(sbox.wc_dir, 'COPIED', 'G'))
# Actually check the format number of the upgraded working copy
check_format(sbox, get_current_format())
# Now check the contents of the working copy
# #### This working copy is not just a basic tree,
# fix with the right data once we get here
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev=7),
'B' : Item(status=' ', wc_rev='7'),
'B/mu' : Item(status=' ', wc_rev='7'),
'B/D' : Item(status=' ', wc_rev='7'),
'B/D/H' : Item(status=' ', wc_rev='7'),
'B/D/H/psi' : Item(status=' ', wc_rev='7'),
'B/D/H/omega' : Item(status=' ', wc_rev='7'),
'B/D/H/zeta' : Item(status='MM', wc_rev='7'),
'B/D/H/chi' : Item(status=' ', wc_rev='7'),
'B/D/gamma' : Item(status=' ', wc_rev='9'),
'B/D/G' : Item(status=' ', wc_rev='7'),
'B/D/G/tau' : Item(status=' ', wc_rev='7'),
'B/D/G/rho' : Item(status=' ', wc_rev='7'),
'B/D/G/pi' : Item(status=' ', wc_rev='7'),
'B/B' : Item(status=' ', wc_rev='7'),
'B/B/lambda' : Item(status=' ', wc_rev='7'),
'MKDIR' : Item(status='A ', wc_rev='0'),
'MKDIR/MKDIR' : Item(status='A ', wc_rev='0'),
'A' : Item(status=' ', wc_rev='7'),
'A/B' : Item(status=' ', wc_rev='7'),
'A/B/lambda' : Item(status=' ', wc_rev='7'),
'A/D' : Item(status=' ', wc_rev='7'),
'A/D/G' : Item(status=' ', wc_rev='7'),
'A/D/G/rho' : Item(status=' ', wc_rev='7'),
'A/D/G/pi' : Item(status=' ', wc_rev='7'),
'A/D/G/tau' : Item(status=' ', wc_rev='7'),
'A/D/H' : Item(status=' ', wc_rev='7'),
'A/D/H/psi' : Item(status=' ', wc_rev='7'),
'A/D/H/omega' : Item(status=' ', wc_rev='7'),
'A/D/H/zeta' : Item(status=' ', wc_rev='7'),
'A/D/H/chi' : Item(status=' ', wc_rev='7'),
'A/D/gamma' : Item(status=' ', wc_rev='7'),
'A/mu' : Item(status=' ', wc_rev='7'),
'iota' : Item(status=' ', wc_rev='7'),
'COPIED' : Item(status=' ', wc_rev='10'),
'DELETED' : Item(status='D ', wc_rev='10'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
expected_infos = [ {
'Node Kind': 'directory',
'Schedule': 'normal',
'Revision': '7',
'Last Changed Author' : 'Bert',
'Last Changed Rev' : '7'
} ]
svntest.actions.run_and_verify_info(expected_infos, sbox.wc_dir)
expected_infos = [ {
'Node Kind': 'directory',
'Schedule': 'delete',
'Revision': '10',
'Last Changed Author' : 'Bert',
'Last Changed Rev' : '10'
} ]
svntest.actions.run_and_verify_info(expected_infos,
os.path.join(sbox.wc_dir, 'DELETED'))
check_pristine(sbox, ['iota', 'A/mu', 'A/D/H/zeta'])
# Helper function for the x3 tests.
def do_x3_upgrade(sbox, expected_error=[]):
# Attempt to use the working copy, this should give an error
expected_stderr = wc_is_too_old_regex
svntest.actions.run_and_verify_svn(None, expected_stderr,
'info', sbox.wc_dir)
# Now upgrade the working copy
svntest.actions.run_and_verify_svn(None, expected_error,
'upgrade', sbox.wc_dir)
if expected_error != []:
return
# Actually check the format number of the upgraded working copy
check_format(sbox, get_current_format())
# Now check the contents of the working copy
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='2'),
'A' : Item(status=' ', wc_rev='2'),
'A/D' : Item(status=' ', wc_rev='2'),
'A/D/H' : Item(status=' ', wc_rev='2'),
'A/D/H/omega' : Item(status=' ', wc_rev='2'),
'A/D/H/psi' : Item(status='D ', wc_rev='2'),
'A/D/H/new' : Item(status='A ', copied='+', wc_rev='-'),
'A/D/H/chi' : Item(status='R ', copied='+', wc_rev='-'),
'A/D/gamma' : Item(status='D ', wc_rev='2'),
'A/D/G' : Item(status=' ', wc_rev='2'),
'A/B_new' : Item(status='A ', copied='+', wc_rev='-'),
'A/B_new/B' : Item(status='A ', copied='+', wc_rev='-'),
'A/B_new/B/E' : Item(status=' M', copied='+', wc_rev='-'),
'A/B_new/B/E/alpha' : Item(status=' ', copied='+', wc_rev='-'),
'A/B_new/B/E/beta' : Item(status='R ', copied='+', wc_rev='-'),
'A/B_new/B/new' : Item(status='A ', copied='+', wc_rev='-'),
'A/B_new/B/lambda' : Item(status='R ', copied='+', wc_rev='-'),
'A/B_new/B/F' : Item(status=' ', copied='+', wc_rev='-'),
'A/B_new/E' : Item(status=' M', copied='+', wc_rev='-'),
'A/B_new/E/alpha' : Item(status=' M', copied='+', wc_rev='-'),
'A/B_new/E/beta' : Item(status='RM', copied='+', wc_rev='-'),
'A/B_new/lambda' : Item(status='R ', copied='+', wc_rev='-'),
'A/B_new/new' : Item(status='A ', copied='+', wc_rev='-'),
'A/B_new/F' : Item(status=' ', copied='+', wc_rev='-'),
'A/B' : Item(status=' ', wc_rev='2'),
'A/B/E' : Item(status=' ', wc_rev='2'),
'A/B/E/beta' : Item(status='RM', copied='+', wc_rev='-'),
'A/B/E/alpha' : Item(status=' M', wc_rev='2'),
'A/B/F' : Item(status=' ', wc_rev='2'),
'A/B/lambda' : Item(status='R ', copied='+', wc_rev='-'),
'A/B/new' : Item(status='A ', copied='+', wc_rev='-'),
'A/G_new' : Item(status='A ', copied='+', wc_rev='-'),
'A/G_new/rho' : Item(status='R ', copied='+', wc_rev='-'),
'iota' : Item(status=' ', wc_rev='2'),
'A_new' : Item(status='A ', wc_rev='0'),
'A_new/alpha' : Item(status='A ', copied='+', wc_rev='-'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
simple_property_verify(sbox.wc_dir, {
'A/B_new/E/beta' : {'x3' : '3x',
'svn:eol-style': 'native'},
'A/B/E/beta' : {'s' : 't',
'svn:eol-style': 'native'},
'A/B_new/B/E/alpha' : {'svn:eol-style': 'native'},
'A/B/E/alpha' : {'q': 'r',
'svn:eol-style': 'native'},
'A_new/alpha' : {'svn:eol-style': 'native'},
'A/B_new/B/new' : {'svn:eol-style': 'native'},
'A/B_new/E/alpha' : {'svn:eol-style': 'native',
'u': 'v'},
'A/B_new/B/E' : {'q': 'r'},
'A/B_new/lambda' : {'svn:eol-style': 'native'},
'A/B_new/E' : {'x3': '3x'},
'A/B_new/new' : {'svn:eol-style': 'native'},
'A/B/lambda' : {'svn:eol-style': 'native'},
'A/B_new/B/E/beta' : {'svn:eol-style': 'native'},
'A/B_new/B/lambda' : {'svn:eol-style': 'native'},
'A/B/new' : {'svn:eol-style': 'native'},
'A/G_new/rho' : {'svn:eol-style': 'native'}
})
svntest.actions.run_and_verify_svn('Reverted.*', [],
'revert', '-R', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='2'),
'A' : Item(status=' ', wc_rev='2'),
'A/D' : Item(status=' ', wc_rev='2'),
'A/D/H' : Item(status=' ', wc_rev='2'),
'A/D/H/omega' : Item(status=' ', wc_rev='2'),
'A/D/H/psi' : Item(status=' ', wc_rev='2'),
'A/D/H/chi' : Item(status=' ', wc_rev='2'),
'A/D/gamma' : Item(status=' ', wc_rev='2'),
'A/D/G' : Item(status=' ', wc_rev='2'),
'A/B' : Item(status=' ', wc_rev='2'),
'A/B/F' : Item(status=' ', wc_rev='2'),
'A/B/E' : Item(status=' ', wc_rev='2'),
'A/B/E/beta' : Item(status=' ', wc_rev='2'),
'A/B/E/alpha' : Item(status=' ', wc_rev='2'),
'A/B/lambda' : Item(status=' ', wc_rev='2'),
'iota' : Item(status=' ', wc_rev='2'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
simple_property_verify(sbox.wc_dir, {
'A/B/E/beta' : {'svn:eol-style': 'native'},
# 'A/B/lambda' : {'svn:eol-style': 'native'},
'A/B/E/alpha' : {'svn:eol-style': 'native'}
})
@Issue(2530)
def x3_1_4_0(sbox):
"3x same wc upgrade 1.4.0 test"
replace_sbox_with_tarfile(sbox, 'wc-3x-1.4.0.tar.bz2', dir='wc-1.4.0')
do_x3_upgrade(sbox, expected_error='.*E155016: The properties of.*are in an '
'indeterminate state and cannot be upgraded. See issue #2530.')
@Issue(3811)
def x3_1_4_6(sbox):
"3x same wc upgrade 1.4.6 test"
replace_sbox_with_tarfile(sbox, 'wc-3x-1.4.6.tar.bz2', dir='wc-1.4.6')
do_x3_upgrade(sbox)
@Issue(3811)
def x3_1_6_12(sbox):
"3x same wc upgrade 1.6.12 test"
replace_sbox_with_tarfile(sbox, 'wc-3x-1.6.12.tar.bz2', dir='wc-1.6.12')
do_x3_upgrade(sbox)
def missing_dirs(sbox):
"missing directories and obstructing files"
# tarball wc looks like:
# svn co URL wc
# svn cp wc/A/B wc/A/B_new
# rm -rf wc/A/B/E wc/A/D wc/A/B_new/E wc/A/B_new/F
# touch wc/A/D wc/A/B_new/F
replace_sbox_with_tarfile(sbox, 'missing-dirs.tar.bz2')
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='1'),
'A' : Item(status=' ', wc_rev='1'),
'A/mu' : Item(status=' ', wc_rev='1'),
'A/C' : Item(status=' ', wc_rev='1'),
'A/D' : Item(status='! ', wc_rev='1'),
'A/B' : Item(status=' ', wc_rev='1'),
'A/B/F' : Item(status=' ', wc_rev='1'),
'A/B/E' : Item(status='! ', wc_rev='1'),
'A/B/lambda' : Item(status=' ', wc_rev='1'),
'iota' : Item(status=' ', wc_rev='1'),
'A/B_new' : Item(status='A ', wc_rev='-', copied='+'),
'A/B_new/E' : Item(status='! ', wc_rev='-'),
'A/B_new/F' : Item(status='! ', wc_rev='-'),
'A/B_new/lambda' : Item(status=' ', wc_rev='-', copied='+'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
def missing_dirs2(sbox):
"missing directories and obstructing dirs"
replace_sbox_with_tarfile(sbox, 'missing-dirs.tar.bz2')
os.remove(sbox.ospath('A/D'))
os.remove(sbox.ospath('A/B_new/F'))
os.mkdir(sbox.ospath('A/D'))
os.mkdir(sbox.ospath('A/B_new/F'))
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='1'),
'A' : Item(status=' ', wc_rev='1'),
'A/mu' : Item(status=' ', wc_rev='1'),
'A/C' : Item(status=' ', wc_rev='1'),
'A/D' : Item(status='! ', wc_rev='1'),
'A/B' : Item(status=' ', wc_rev='1'),
'A/B/F' : Item(status=' ', wc_rev='1'),
'A/B/E' : Item(status='! ', wc_rev='1'),
'A/B/lambda' : Item(status=' ', wc_rev='1'),
'iota' : Item(status=' ', wc_rev='1'),
'A/B_new' : Item(status='A ', wc_rev='-', copied='+'),
'A/B_new/E' : Item(status='! ', wc_rev='-'),
'A/B_new/F' : Item(status='! ', wc_rev='-'),
'A/B_new/lambda' : Item(status=' ', wc_rev='-', copied='+'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
@Issue(3808)
def delete_and_keep_local(sbox):
"check status delete and delete --keep-local"
replace_sbox_with_tarfile(sbox, 'wc-delete.tar.bz2')
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='0'),
'Normal' : Item(status=' ', wc_rev='1'),
'Deleted-Keep-Local': Item(status='D ', wc_rev='1'),
'Deleted' : Item(status='D ', wc_rev='1'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
# Deleted-Keep-Local should still exist after the upgrade
if not os.path.exists(os.path.join(sbox.wc_dir, 'Deleted-Keep-Local')):
raise svntest.Failure('wc/Deleted-Keep-Local should exist')
# Deleted should be removed after the upgrade as it was
# schedule delete and doesn't contain unversioned changes.
if os.path.exists(os.path.join(sbox.wc_dir, 'Deleted')):
raise svntest.Failure('wc/Deleted should not exist')
def dirs_only_upgrade(sbox):
"upgrade a wc without files"
replace_sbox_with_tarfile(sbox, 'dirs-only.tar.bz2')
expected_output = ["Upgraded '%s'\n" % (sbox.ospath('').rstrip(os.path.sep)),
"Upgraded '%s'\n" % (sbox.ospath('A'))]
svntest.actions.run_and_verify_svn(expected_output, [],
'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir, {
'' : Item(status=' ', wc_rev='1'),
'A' : Item(status=' ', wc_rev='1'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
def read_tree_conflict_data(sbox, path):
dot_svn = svntest.main.get_admin_name()
db = svntest.sqlite3.connect(os.path.join(sbox.wc_dir, dot_svn, 'wc.db'))
for row in db.execute("select tree_conflict_data from actual_node "
"where tree_conflict_data is not null "
"and local_relpath = '%s'" % path):
return
raise svntest.Failure("conflict expected for '%s'" % path)
def no_actual_node(sbox, path):
dot_svn = svntest.main.get_admin_name()
db = svntest.sqlite3.connect(os.path.join(sbox.wc_dir, dot_svn, 'wc.db'))
for row in db.execute("select 1 from actual_node "
"where local_relpath = '%s'" % path):
raise svntest.Failure("no actual node expected for '%s'" % path)
def upgrade_tree_conflict_data(sbox):
"upgrade tree conflict data (f20->f21)"
wc_dir = sbox.wc_dir
replace_sbox_with_tarfile(sbox, 'upgrade_tc.tar.bz2')
# Check and see if we can still read our tree conflicts
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
expected_status.tweak('A/D/G/pi', status='D ', treeconflict='C')
expected_status.tweak('A/D/G/tau', status='! ', treeconflict='C',
wc_rev=None)
expected_status.tweak('A/D/G/rho', status='A ', copied='+',
treeconflict='C', wc_rev='-')
# Look inside pre-upgrade database
read_tree_conflict_data(sbox, 'A/D/G')
no_actual_node(sbox, 'A/D/G/pi')
no_actual_node(sbox, 'A/D/G/rho')
no_actual_node(sbox, 'A/D/G/tau')
# While the upgrade from f20 to f21 will work the upgrade from f22
# to f23 will not, since working nodes are present.
exit_code, output, errput = svntest.main.run_svn('format 22', 'upgrade',
wc_dir)
if not exit_code:
run_and_verify_status_no_server(wc_dir, expected_status)
else:
if not svntest.verify.RegexOutput('.*format 22 with WORKING nodes.*',
match_all=False).matches(errput):
raise svntest.Failure()
# Look insde post-upgrade database
read_tree_conflict_data(sbox, 'A/D/G/pi')
read_tree_conflict_data(sbox, 'A/D/G/rho')
read_tree_conflict_data(sbox, 'A/D/G/tau')
# no_actual_node(sbox, 'A/D/G') ### not removed but should be?
@Issue(3898)
def delete_in_copy_upgrade(sbox):
"upgrade a delete within a copy"
wc_dir = sbox.wc_dir
replace_sbox_with_tarfile(sbox, 'delete-in-copy.tar.bz2')
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
expected_status.add({
'A/B-copied' : Item(status='A ', copied='+', wc_rev='-'),
'A/B-copied/lambda' : Item(status=' ', copied='+', wc_rev='-'),
'A/B-copied/E' : Item(status='D ', copied='+', wc_rev='-'),
'A/B-copied/E/alpha' : Item(status='D ', copied='+', wc_rev='-'),
'A/B-copied/E/beta' : Item(status='D ', copied='+', wc_rev='-'),
'A/B-copied/F' : Item(status=' ', copied='+', wc_rev='-'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
svntest.actions.run_and_verify_svn('Reverted.*', [], 'revert', '-R',
sbox.ospath('A/B-copied/E'))
expected_status.tweak('A/B-copied/E',
'A/B-copied/E/alpha',
'A/B-copied/E/beta',
status=' ')
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
simple_checksum_verify([[sbox.ospath('A/B-copied/E/alpha'),
'b347d1da69df9a6a70433ceeaa0d46c8483e8c03']])
def replaced_files(sbox):
"upgrade with base and working replaced files"
wc_dir = sbox.wc_dir
replace_sbox_with_tarfile(sbox, 'replaced-files.tar.bz2')
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
# A is a checked-out dir containing A/f and A/g, then
# svn cp wc/A wc/B
# svn rm wc/A/f wc/B/f
# svn cp wc/A/g wc/A/f # A/f replaced by copied A/g
# svn cp wc/A/g wc/B/f # B/f replaced by copied A/g (working-only)
# svn rm wc/A/g wc/B/g
# touch wc/A/g wc/B/g
# svn add wc/A/g wc/B/g # A/g replaced, B/g replaced (working-only)
# svn ps pX vX wc/A/g
# svn ps pY vY wc/B/g
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='5'),
'A' : Item(status=' ', wc_rev='5'),
'A/f' : Item(status='R ', wc_rev='-', copied='+'),
'A/g' : Item(status='RM', wc_rev='5'),
'B' : Item(status='A ', wc_rev='-', copied='+'),
'B/f' : Item(status='R ', wc_rev='-', copied='+'),
'B/g' : Item(status='RM', wc_rev='-'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
simple_property_verify(sbox.wc_dir, {
'A/f' : {'pAg' : 'vAg' },
'A/g' : {'pX' : 'vX' },
'B/f' : {'pAg' : 'vAg' },
'B/g' : {'pY' : 'vY' },
})
simple_checksum_verify([
[sbox.ospath('A/f'), '395dfb603d8a4e0348d0b082803f2b7426c76eb9'],
[sbox.ospath('A/g'), None],
[sbox.ospath('B/f'), '395dfb603d8a4e0348d0b082803f2b7426c76eb9'],
[sbox.ospath('B/g'), None]])
svntest.actions.run_and_verify_svn('Reverted.*', [], 'revert',
sbox.ospath('A/f'), sbox.ospath('B/f'),
sbox.ospath('A/g'), sbox.ospath('B/g'))
simple_property_verify(sbox.wc_dir, {
'A/f' : {'pAf' : 'vAf' },
'A/g' : {'pAg' : 'vAg' },
'B/f' : {'pAf' : 'vAf' },
'B/g' : {'pAg' : 'vAg' },
})
simple_checksum_verify([
[sbox.ospath('A/f'), '958eb2d755df2d9e0de6f7b835aec16b64d83f6f'],
[sbox.ospath('A/g'), '395dfb603d8a4e0348d0b082803f2b7426c76eb9'],
[sbox.ospath('B/f'), '958eb2d755df2d9e0de6f7b835aec16b64d83f6f'],
[sbox.ospath('B/g'), '395dfb603d8a4e0348d0b082803f2b7426c76eb9']])
def upgrade_with_scheduled_change(sbox):
"upgrade 1.6.x wc with a scheduled change"
replace_sbox_with_tarfile(sbox, 'upgrade_with_scheduled_change.tar.bz2')
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
expected_status.add({
'A/scheduled_file_1' : Item(status='A ', wc_rev='-'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
@Issue(3777)
def tree_replace1(sbox):
"upgrade 1.6 with tree replaced"
replace_sbox_with_tarfile(sbox, 'tree-replace1.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' M', wc_rev=17),
'B' : Item(status='R ', copied='+', wc_rev='-'),
'B/f' : Item(status=' ', copied='+', wc_rev='-'),
'B/g' : Item(status='D ', wc_rev=17),
'B/h' : Item(status=' ', copied='+', wc_rev='-'),
'B/C' : Item(status=' ', copied='+', wc_rev='-'),
'B/C/f' : Item(status=' ', copied='+', wc_rev='-'),
'B/D' : Item(status='D ', wc_rev=17),
'B/D/f' : Item(status='D ', wc_rev=17),
'B/E' : Item(status=' ', copied='+', wc_rev='-'),
'B/E/f' : Item(status=' ', copied='+', wc_rev='-'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
@Issue(3777)
def tree_replace2(sbox):
"upgrade 1.6 with tree replaced (2)"
replace_sbox_with_tarfile(sbox, 'tree-replace2.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' M', wc_rev=12),
'B' : Item(status='R ', copied='+', wc_rev='-'),
'B/f' : Item(status='D ', wc_rev=12),
'B/D' : Item(status='D ', wc_rev=12),
'B/g' : Item(status=' ', copied='+', wc_rev='-'),
'B/E' : Item(status=' ', copied='+', wc_rev='-'),
'C' : Item(status='R ', copied='+', wc_rev='-'),
'C/f' : Item(status=' ', copied='+', wc_rev='-'),
'C/D' : Item(status=' ', copied='+', wc_rev='-'),
'C/g' : Item(status='D ', wc_rev=12),
'C/E' : Item(status='D ', wc_rev=12),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
def upgrade_from_format_28(sbox):
"""upgrade from format 28: rename pristines"""
# Start with a format-28 WC that is a clean checkout of the Greek tree.
replace_sbox_with_tarfile(sbox, 'format_28.tar.bz2')
# Get the old and new pristine file paths for file 'iota'.
checksum = '2c0aa9014a0cd07f01795a333d82485ef6d083e2'
old_pristine_path = os.path.join(sbox.wc_dir, svntest.main.get_admin_name(),
'pristine', checksum[0:2], checksum)
new_pristine_path = old_pristine_path + '.svn-base'
assert os.path.exists(old_pristine_path)
assert not os.path.exists(new_pristine_path)
# Upgrade the WC
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
assert not os.path.exists(old_pristine_path)
assert os.path.exists(new_pristine_path)
@Issue(3901)
def depth_exclude(sbox):
"upgrade 1.6.x wc that has depth=exclude"
replace_sbox_with_tarfile(sbox, 'depth_exclude.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='1'),
'A' : Item(status=' ', wc_rev='1'),
'X' : Item(status='A ', copied='+', wc_rev='-'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
@Issue(3901)
def depth_exclude_2(sbox):
"1.6.x wc that has depth=exclude inside a delete"
replace_sbox_with_tarfile(sbox, 'depth_exclude_2.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='1'),
'A' : Item(status='D ', wc_rev='1'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
@Issue(3916)
def add_add_del_del_tc(sbox):
"wc with add-add and del-del tree conflicts"
replace_sbox_with_tarfile(sbox, 'add_add_del_del_tc.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='4'),
'A' : Item(status=' ', wc_rev='4'),
'A/B' : Item(status='A ', treeconflict='C', copied='+', wc_rev='-'),
'X' : Item(status=' ', wc_rev='3'),
'X/Y' : Item(status='! ', treeconflict='C')
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
@Issue(3916)
def add_add_x2(sbox):
"wc with 2 tree conflicts in same entry"
replace_sbox_with_tarfile(sbox, 'add_add_x2.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='3'),
'A' : Item(status=' ', wc_rev='3'),
'A/X' : Item(status='A ', treeconflict='C', copied='+', wc_rev='-'),
'A/Y' : Item(status='A ', treeconflict='C', copied='+', wc_rev='-'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
@Issue(3940)
def upgrade_with_missing_subdir(sbox):
"test upgrading a working copy with missing subdir"
sbox.build(create_wc = False)
replace_sbox_with_tarfile(sbox, 'basic_upgrade.tar.bz2')
simple_entries_replace(sbox.wc_dir,
'file:///Users/Hyrum/dev/test/greek-1.6.repo',
sbox.repo_url)
svntest.main.run_svnadmin('setuuid', sbox.repo_dir,
'cafefeed-babe-face-dead-beeff00dfade')
url = sbox.repo_url
wc_dir = sbox.wc_dir
# Attempt to use the working copy, this should give an error
expected_stderr = wc_is_too_old_regex
svntest.actions.run_and_verify_svn(None, expected_stderr,
'info', sbox.wc_dir)
# Now remove a subdirectory
svntest.main.safe_rmtree(sbox.ospath('A/B'))
# Now upgrade the working copy and expect a missing subdir
expected_output = svntest.verify.UnorderedOutput([
"Upgraded '%s'\n" % sbox.wc_dir,
"Upgraded '%s'\n" % sbox.ospath('A'),
"Skipped '%s'\n" % sbox.ospath('A/B'),
"Upgraded '%s'\n" % sbox.ospath('A/C'),
"Upgraded '%s'\n" % sbox.ospath('A/D'),
"Upgraded '%s'\n" % sbox.ospath('A/D/G'),
"Upgraded '%s'\n" % sbox.ospath('A/D/H'),
])
svntest.actions.run_and_verify_svn(expected_output, [],
'upgrade', sbox.wc_dir)
# And now perform an update. (This used to fail with an assertion)
expected_output = svntest.wc.State(wc_dir, {
'A/B' : Item(verb='Restored'),
'A/B/E' : Item(status='A '),
'A/B/E/alpha' : Item(status='A '),
'A/B/E/beta' : Item(status='A '),
'A/B/lambda' : Item(status='A '),
'A/B/F' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
# Do the update and check the results in three ways.
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status)
@Issue(3994)
def upgrade_locked(sbox):
"upgrade working copy with locked files"
replace_sbox_with_tarfile(sbox, 'upgrade_locked.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev=1),
'A' : Item(status='D ', wc_rev=2),
'A/third' : Item(status='D ', writelocked='K', wc_rev=2),
'other' : Item(status='D ', writelocked='K', wc_rev=4),
'iota' : Item(status=' ', writelocked='K', wc_rev=3),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
@Issue(4015)
def upgrade_file_externals(sbox):
"upgrade with file externals"
sbox.build()
replace_sbox_with_tarfile(sbox, 'upgrade_file_externals.tar.bz2')
svntest.main.run_svnadmin('setuuid', sbox.repo_dir,
'07146bbd-0b64-4aaf-ab70-cd76a0df2d41')
expected_output = svntest.verify.RegexOutput('r2 committed.*')
svntest.actions.run_and_verify_svnmucc(expected_output, [],
'-m', 'r2',
'propset', 'svn:externals',
'^/A/B/E EX\n^/A/mu muX',
sbox.repo_url + '/A/B/F')
expected_output = svntest.verify.RegexOutput('r3 committed.*')
svntest.actions.run_and_verify_svnmucc(expected_output, [],
'-m', 'r3',
'propset', 'svn:externals',
'^/A/B/F FX\n^/A/B/lambda lambdaX',
sbox.repo_url + '/A/C')
expected_output = svntest.verify.RegexOutput('r4 committed.*')
svntest.actions.run_and_verify_svnmucc(expected_output, [],
'-m', 'r4',
'propset', 'pname1', 'pvalue1',
sbox.repo_url + '/A/mu',
'propset', 'pname2', 'pvalue2',
sbox.repo_url + '/A/B/lambda',
'propset', 'pname3', 'pvalue3',
sbox.repo_url + '/A/B/E/alpha')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
svntest.actions.run_and_verify_svn(None, [], 'relocate',
'file:///tmp/repo', sbox.repo_url,
sbox.wc_dir)
expected_output = svntest.wc.State(sbox.wc_dir, {
'A/mu' : Item(status=' U'),
'A/B/lambda' : Item(status=' U'),
'A/B/E/alpha' : Item(status=' U'),
'A/C/FX/EX/alpha' : Item(status=' U'),
'A/C/FX/muX' : Item(status=' U'),
'A/C/lambdaX' : Item(status=' U'),
'A/B/F/EX/alpha' : Item(status=' U'),
'A/B/F/muX' : Item(status=' U'),
})
svntest.actions.run_and_verify_update(sbox.wc_dir, expected_output,
None, None)
### simple_property_verify only sees last line of multi-line
### property values such as svn:externals
simple_property_verify(sbox.wc_dir, {
'A/mu' : {'pname1' : 'pvalue1' },
'A/B/lambda' : {'pname2' : 'pvalue2' },
'A/B/E/alpha' : {'pname3' : 'pvalue3' },
'A/B/F' : {'svn:externals' : '^/A/mu muX'},
'A/C' : {'svn:externals' : '^/A/B/lambda lambdaX'},
'A/B/F/muX' : {'pname1' : 'pvalue1' },
'A/C/lambdaX' : {'pname2' : 'pvalue2' },
})
simple_property_verify(sbox.ospath('A/C/FX'), {
'' : {'svn:externals' : '^/A/mu muX'},
'muX' : {'pname1' : 'pvalue1' },
})
simple_property_verify(sbox.ospath('A/C/FX/EX'), {
'alpha' : {'pname3' : 'pvalue3' },
})
@Issue(4035)
def upgrade_missing_replaced(sbox):
"upgrade with missing replaced dir"
sbox.build(create_wc=False)
replace_sbox_with_tarfile(sbox, 'upgrade_missing_replaced.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
svntest.main.run_svnadmin('setuuid', sbox.repo_dir,
'd7130b12-92f6-45c9-9217-b9f0472c3fab')
svntest.actions.run_and_verify_svn(None, [], 'relocate',
'file:///tmp/repo', sbox.repo_url,
sbox.wc_dir)
expected_output = svntest.wc.State(sbox.wc_dir, {
'A/B/E' : Item(status=' ', treeconflict='C',
prev_verb='Restored'),
'A/B/E/alpha' : Item(status=' ', treeconflict='A'),
'A/B/E/beta' : Item(status=' ', treeconflict='A'),
})
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
expected_status.tweak('A/B/E', status='! ', treeconflict='C', wc_rev='-',
entry_status='R ', entry_rev='1')
expected_status.tweak('A/B/E/alpha', 'A/B/E/beta', status='D ')
# This upgrade installs an INCOMPLETE node in WORKING for E, which makes the
# database technically invalid... but we did that for 1.7 and nobody noticed.
# Pass the old status tree to avoid testing via entries-dump
# as fetching the entries crashes on the invalid db state.
svntest.actions.run_and_verify_update(sbox.wc_dir, expected_output,
None, expected_status)
svntest.actions.run_and_verify_svn('Reverted.*', [], 'revert', '-R',
sbox.wc_dir)
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
# And verify that the state is now valid in both the entries an status world.
svntest.actions.run_and_verify_status(sbox.wc_dir, expected_status)
@Issue(4033)
def upgrade_not_present_replaced(sbox):
"upgrade with not-present replaced nodes"
sbox.build(create_wc=False)
replace_sbox_with_tarfile(sbox, 'upgrade_not_present_replaced.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
svntest.main.run_svnadmin('setuuid', sbox.repo_dir,
'd7130b12-92f6-45c9-9217-b9f0472c3fab')
svntest.actions.run_and_verify_svn(None, [], 'relocate',
'file:///tmp/repo', sbox.repo_url,
sbox.wc_dir)
expected_output = svntest.wc.State(sbox.wc_dir, {
'A/B/E' : Item(status='E '),
'A/B/E/alpha' : Item(status='A '),
'A/B/E/beta' : Item(status='A '),
'A/B/lambda' : Item(status='E '),
})
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
svntest.actions.run_and_verify_update(sbox.wc_dir, expected_output,
None, expected_status)
@Issue(4307)
def upgrade_from_1_7_conflict(sbox):
"upgrade from 1.7 WC with conflict (format 29)"
sbox.build(create_wc=False)
replace_sbox_with_tarfile(sbox, 'upgrade_from_1_7_wc.tar.bz2')
# The working copy contains a text conflict, and upgrading such
# a working copy used to cause a pointless 'upgrade required' error.
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
def do_iprops_upgrade(nonrootfile, rootfile, sbox):
wc_dir = sbox.wc_dir
replace_sbox_with_tarfile(sbox, nonrootfile)
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
svntest.actions.run_and_verify_svn(None, [], 'relocate',
'file:///tmp/repo', sbox.repo_url, wc_dir)
expected_output = []
expected_disk = svntest.wc.State('', {
'E' : Item(),
'E/alpha' : Item(contents="This is the file 'alpha'.\n"),
'E/beta' : Item(contents="This is the file 'beta'.\n"),
'F' : Item(),
'lambda' : Item(contents="This is the file 'lambda'.\n"),
})
expected_status = svntest.wc.State(sbox.wc_dir, {
'' : Item(),
'E' : Item(switched='S'),
'E/alpha' : Item(),
'E/beta' : Item(),
'F' : Item(),
'lambda' : Item(),
})
expected_status.tweak(status=' ', wc_rev=2)
# No inherited props after upgrade until an update
expected_iprops = {}
expected_explicit_props = {}
svntest.actions.run_and_verify_inherited_prop_xml(
wc_dir, expected_iprops, expected_explicit_props)
svntest.actions.run_and_verify_inherited_prop_xml(
sbox.ospath('E'), expected_iprops, expected_explicit_props)
# Update populates the inherited props
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status)
expected_iprops = {sbox.repo_url : {'p' : 'v'},
sbox.repo_url + '/A' : {'pA' : 'vA'}}
svntest.actions.run_and_verify_inherited_prop_xml(
wc_dir, expected_iprops, expected_explicit_props)
expected_iprops = {sbox.repo_url : {'p' : 'v'},
sbox.repo_url + '/X' : {'pX' : 'vX'}}
svntest.actions.run_and_verify_inherited_prop_xml(
sbox.ospath('E'), expected_iprops, expected_explicit_props)
# Now try with a repository root working copy
replace_sbox_with_tarfile(sbox, rootfile)
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
svntest.actions.run_and_verify_svn(None, [], 'relocate',
'file:///tmp/repo', sbox.repo_url, wc_dir)
# Unswitched inherited props available after upgrade
expected_iprops = {wc_dir : {'p' : 'v'},
sbox.ospath('A') : {'pA' : 'vA'}}
svntest.actions.run_and_verify_inherited_prop_xml(
sbox.ospath('A/B'), expected_iprops, expected_explicit_props)
# Switched inherited props not populated until update after upgrade
expected_iprops = {}
svntest.actions.run_and_verify_inherited_prop_xml(
sbox.ospath('A/B/E'), expected_iprops, expected_explicit_props)
expected_disk = svntest.wc.State('', {
'A' : Item(),
'A/B' : Item(),
'A/B/E' : Item(),
})
expected_status = svntest.wc.State(sbox.wc_dir, {
'' : Item(),
'A' : Item(),
'A/B' : Item(),
'A/B/E' : Item(switched='S'),
})
expected_status.tweak(status=' ', wc_rev=2)
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status)
expected_iprops = {wc_dir : {'p' : 'v'},
sbox.ospath('A') : {'pA' : 'vA'}}
svntest.actions.run_and_verify_inherited_prop_xml(
sbox.ospath('A/B'), expected_iprops, expected_explicit_props)
expected_iprops = {sbox.repo_url : {'p' : 'v'},
sbox.repo_url + '/X' : {'pX' : 'vX'}}
expected_explicit_props = {}
svntest.actions.run_and_verify_inherited_prop_xml(
sbox.ospath('A/B/E'), expected_iprops, expected_explicit_props)
def iprops_upgrade(sbox):
"inherited properties after upgrade from 1.7"
sbox.build()
sbox.simple_copy('A', 'X')
sbox.simple_propset('p', 'v', '')
sbox.simple_propset('pA', 'vA', 'A')
sbox.simple_propset('pX', 'vX', 'X')
sbox.simple_commit()
svntest.main.run_svnadmin('setuuid', sbox.repo_dir,
'8f4d0ebe-2ebf-4f62-ad11-804fd88c2382')
do_iprops_upgrade('iprops_upgrade_nonroot.tar.bz2',
'iprops_upgrade_root.tar.bz2',
sbox)
def iprops_upgrade1_6(sbox):
"inherited properties after upgrade from 1.6"
sbox.build()
sbox.simple_copy('A', 'X')
sbox.simple_propset('p', 'v', '')
sbox.simple_propset('pA', 'vA', 'A')
sbox.simple_propset('pX', 'vX', 'X')
sbox.simple_commit()
svntest.main.run_svnadmin('setuuid', sbox.repo_dir,
'8f4d0ebe-2ebf-4f62-ad11-804fd88c2382')
do_iprops_upgrade('iprops_upgrade_nonroot1_6.tar.bz2',
'iprops_upgrade_root1_6.tar.bz2',
sbox)
def changelist_upgrade_1_6(sbox):
"upgrade from 1.6 with changelist"
sbox.build(create_wc = False)
svntest.main.run_svnadmin('setuuid', sbox.repo_dir,
'aa4c97bd-2e1a-4e55-a1e5-3db22cff2673')
replace_sbox_with_tarfile(sbox, 'changelist_upgrade_1_6.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
exit_code, output, errput = svntest.main.run_svn(None, 'info', sbox.wc_dir,
'--depth', 'infinity',
'--changelist', 'foo')
paths = [x for x in output if x[:6] == 'Path: ']
expected_paths = ['Path: %s\n' % sbox.ospath('A/D/gamma')]
if paths != expected_paths:
raise svntest.Failure("changelist not matched")
def upgrade_1_7_dir_external(sbox):
"upgrade from 1.7 with dir external"
sbox.build(create_wc = False)
replace_sbox_with_tarfile(sbox, 'upgrade_1_7_dir_external.tar.bz2')
# This fails for 'make check EXCLUSIVE_WC_LOCKS=1' giving an error:
# svn: warning: W200033: sqlite[S5]: database is locked
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
def auto_analyze(sbox):
"""automatic SQLite ANALYZE"""
sbox.build(create_wc = False)
replace_sbox_with_tarfile(sbox, 'wc-without-stat1.tar.bz2')
svntest.main.run_svnadmin('setuuid', sbox.repo_dir,
'52ec7e4b-e5f0-451d-829f-f05d5571b4ab')
# Don't use svn to do relocate as that will add the table.
val = svntest.wc.sqlite_exec(sbox.wc_dir,
"update repository "
"set root ='" + sbox.repo_url + "'")
val = svntest.wc.sqlite_exec(sbox.wc_dir,
"select 1 from sqlite_master "
"where name = 'sqlite_stat1'")
if val != None:
raise svntest.Failure("initial state failed")
# Make working copy read-only (but not wc_dir itself as
# svntest.main.chmod_tree will not reset it.)
for path, subdirs, files in os.walk(sbox.wc_dir):
for d in subdirs:
os.chmod(os.path.join(path, d), 0555)
for f in files:
os.chmod(os.path.join(path, f), 0444)
state = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
svntest.actions.run_and_verify_status(sbox.wc_dir, state)
svntest.main.chmod_tree(sbox.wc_dir, 0666, 0022)
state = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
svntest.actions.run_and_verify_status(sbox.wc_dir, state)
val = svntest.wc.sqlite_stmt(sbox.wc_dir,
"select 1 from sqlite_master "
"where name = 'sqlite_stat1'")
if val != [(1,)]:
raise svntest.Failure("analyze failed")
########################################################################
# Run the tests
# prop states
#
# .base simple checkout
# .base, .revert delete, copy-here
# .working add, propset
# .base, .working checkout, propset
# .base, .revert, .working delete, copy-here, propset
# .revert, .working delete, add, propset
# .revert delete, add
#
# 1.3.x (f4)
# 1.4.0 (f8, buggy)
# 1.4.6 (f8, fixed)
# list all tests here, starting with None:
test_list = [ None,
basic_upgrade,
upgrade_with_externals,
upgrade_1_5,
update_1_5,
logs_left_1_5,
upgrade_wcprops,
basic_upgrade_1_0,
# Upgrading from 1.4.0-1.4.5 with specific states fails
# See issue #2530
x3_1_4_0,
x3_1_4_6,
x3_1_6_12,
missing_dirs,
missing_dirs2,
delete_and_keep_local,
dirs_only_upgrade,
upgrade_tree_conflict_data,
delete_in_copy_upgrade,
replaced_files,
upgrade_with_scheduled_change,
tree_replace1,
tree_replace2,
upgrade_from_format_28,
depth_exclude,
depth_exclude_2,
add_add_del_del_tc,
add_add_x2,
upgrade_with_missing_subdir,
upgrade_locked,
upgrade_file_externals,
upgrade_missing_replaced,
upgrade_not_present_replaced,
upgrade_from_1_7_conflict,
iprops_upgrade,
iprops_upgrade1_6,
changelist_upgrade_1_6,
upgrade_1_7_dir_external,
auto_analyze,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
* subversion/tests/cmdline/upgrade_tests.py
(auto_analyze): Get SQLite return value where needed.
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@1665852 13f79535-47bb-0310-9956-ffa450edef68
#!/usr/bin/env python
#
# upgrade_tests.py: test the working copy upgrade process
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
#
# These tests exercise the upgrade capabilities of 'svn upgrade' as it
# moves working copies between wc-1 and wc-ng.
#
import os
import re
import shutil
import sys
import tarfile
import tempfile
import logging
logger = logging.getLogger()
import svntest
from svntest import wc
Item = svntest.wc.StateItem
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
wc_is_too_old_regex = (".*is too old \(format \d+.*\).*")
def get_current_format():
# Get current format from subversion/libsvn_wc/wc.h
format_file = open(os.path.join(os.path.dirname(__file__), "..", "..", "libsvn_wc", "wc.h")).read()
return int(re.search("\n#define SVN_WC__VERSION (\d+)\n", format_file).group(1))
def replace_sbox_with_tarfile(sbox, tar_filename,
dir=None):
try:
svntest.main.safe_rmtree(sbox.wc_dir)
except OSError, e:
pass
if not dir:
dir = tar_filename.split('.')[0]
tarpath = os.path.join(os.path.dirname(sys.argv[0]), 'upgrade_tests_data',
tar_filename)
t = tarfile.open(tarpath, 'r:bz2')
extract_dir = tempfile.mkdtemp(dir=svntest.main.temp_dir)
for member in t.getmembers():
t.extract(member, extract_dir)
shutil.move(os.path.join(extract_dir, dir), sbox.wc_dir)
def replace_sbox_repo_with_tarfile(sbox, tar_filename, dir=None):
try:
svntest.main.safe_rmtree(sbox.repo_dir)
except OSError, e:
pass
if not dir:
dir = tar_filename.split('.')[0]
tarpath = os.path.join(os.path.dirname(sys.argv[0]), 'upgrade_tests_data',
tar_filename)
t = tarfile.open(tarpath, 'r:bz2')
extract_dir = tempfile.mkdtemp(dir=svntest.main.temp_dir)
for member in t.getmembers():
t.extract(member, extract_dir)
shutil.move(os.path.join(extract_dir, dir), sbox.repo_dir)
def check_format(sbox, expected_format):
dot_svn = svntest.main.get_admin_name()
for root, dirs, files in os.walk(sbox.wc_dir):
db = svntest.sqlite3.connect(os.path.join(root, dot_svn, 'wc.db'))
c = db.cursor()
c.execute('pragma user_version;')
found_format = c.fetchone()[0]
db.close()
if found_format != expected_format:
raise svntest.Failure("found format '%d'; expected '%d'; in wc '%s'" %
(found_format, expected_format, root))
dirs[:] = []
if dot_svn in dirs:
dirs.remove(dot_svn)
def check_pristine(sbox, files):
for file in files:
file_path = sbox.ospath(file)
file_text = open(file_path, 'r').read()
file_pristine = open(svntest.wc.text_base_path(file_path), 'r').read()
if (file_text != file_pristine):
raise svntest.Failure("pristine mismatch for '%s'" % (file))
def check_dav_cache(dir_path, wc_id, expected_dav_caches):
dot_svn = svntest.main.get_admin_name()
db = svntest.sqlite3.connect(os.path.join(dir_path, dot_svn, 'wc.db'))
c = db.cursor()
# Check if python's sqlite can read our db
c.execute('select sqlite_version()')
sqlite_ver = map(int, c.fetchone()[0].split('.'))
# SQLite versions have 3 or 4 number groups
major = sqlite_ver[0]
minor = sqlite_ver[1]
patch = sqlite_ver[2]
if major < 3 or (major == 3 and minor < 6) \
or (major == 3 and minor == 6 and patch < 18):
return # We need a newer SQLite
for local_relpath, expected_dav_cache in expected_dav_caches.items():
# NODES conversion is complete enough that we can use it if it exists
c.execute("""pragma table_info(nodes)""")
if c.fetchone():
c.execute('select dav_cache from nodes ' +
'where wc_id=? and local_relpath=? and op_depth = 0',
(wc_id, local_relpath))
row = c.fetchone()
else:
c.execute('select dav_cache from base_node ' +
'where wc_id=? and local_relpath=?',
(wc_id, local_relpath))
row = c.fetchone()
if row is None:
raise svntest.Failure("no dav cache for '%s'" % (local_relpath))
dav_cache = str(row[0])
if dav_cache != expected_dav_cache:
raise svntest.Failure(
"wrong dav cache for '%s'\n Found: '%s'\n Expected: '%s'" %
(local_relpath, dav_cache, expected_dav_cache))
db.close()
# Very simple working copy property diff handler for single line textual properties
# Should probably be moved to svntest/actions.py after some major refactoring.
def simple_property_verify(dir_path, expected_props):
# Shows all items in dict1 that are not also in dict2
def diff_props(dict1, dict2, name, match):
equal = True;
for key in dict1:
node = dict1[key]
node2 = dict2.get(key, None)
if node2:
for prop in node:
v1 = node[prop]
v2 = node2.get(prop, None)
if not v2:
logger.warn('\'%s\' property on \'%s\' not found in %s',
prop, key, name)
equal = False
if match and v1 != v2:
logger.warn('Expected \'%s\' on \'%s\' to be \'%s\', but found \'%s\'',
prop, key, v1, v2)
equal = False
else:
logger.warn('\'%s\': %s not found in %s', key, dict1[key], name)
equal = False
return equal
exit_code, output, errput = svntest.main.run_svn(None, 'proplist', '-R',
'-v', dir_path)
actual_props = {}
target = None
name = None
for i in output:
if i.startswith('Properties on '):
target = i[15+len(dir_path)+1:-3].replace(os.path.sep, '/')
elif not i.startswith(' '):
name = i.strip()
else:
v = actual_props.get(target, {})
v[name] = i.strip()
actual_props[target] = v
v1 = diff_props(expected_props, actual_props, 'actual', True)
v2 = diff_props(actual_props, expected_props, 'expected', False)
if not v1 or not v2:
logger.warn('Actual properties: %s', actual_props)
raise svntest.Failure("Properties unequal")
def simple_checksum_verify(expected_checksums):
for path, checksum in expected_checksums:
exit_code, output, errput = svntest.main.run_svn(None, 'info', path)
if exit_code:
raise svntest.Failure()
if checksum:
if not svntest.verify.RegexOutput('Checksum: ' + checksum,
match_all=False).matches(output):
raise svntest.Failure("did not get expected checksum " + checksum)
if not checksum:
if svntest.verify.RegexOutput('Checksum: ',
match_all=False).matches(output):
raise svntest.Failure("unexpected checksum")
def run_and_verify_status_no_server(wc_dir, expected_status):
"same as svntest.actions.run_and_verify_status(), but without '-u'"
exit_code, output, errput = svntest.main.run_svn(None, 'st', '-q', '-v',
wc_dir)
actual = svntest.tree.build_tree_from_status(output)
try:
svntest.tree.compare_trees("status", actual, expected_status.old_tree())
except svntest.tree.SVNTreeError:
svntest.verify.display_trees(None, 'STATUS OUTPUT TREE',
expected_status.old_tree(), actual)
logger.warn("ACTUAL STATUS TREE:")
svntest.tree.dump_tree_script(actual, wc_dir + os.sep)
raise
def basic_upgrade(sbox):
"basic upgrade behavior"
replace_sbox_with_tarfile(sbox, 'basic_upgrade.tar.bz2')
# Attempt to use the working copy, this should give an error
svntest.actions.run_and_verify_svn(None, wc_is_too_old_regex,
'info', sbox.wc_dir)
# Upgrade on something anywhere within a versioned subdir gives a
# 'not a working copy root' error. Upgrade on something without any
# versioned parent gives a 'not a working copy' error.
# Both cases use the same error code.
not_wc = ".*(E155007|E155019).*%s'.*not a working copy.*"
os.mkdir(sbox.ospath('X'))
svntest.actions.run_and_verify_svn(None, not_wc % 'X',
'upgrade', sbox.ospath('X'))
# Upgrade on a non-existent subdir within an old WC gives a
# 'not a working copy' error.
svntest.actions.run_and_verify_svn(None, not_wc % 'Y',
'upgrade', sbox.ospath('Y'))
# Upgrade on a versioned file within an old WC gives a
# 'not a working copy' error.
svntest.actions.run_and_verify_svn(None, not_wc % 'mu',
'upgrade', sbox.ospath('A/mu'))
# Upgrade on a versioned dir within an old WC gives a
# 'not a working copy' error.
svntest.actions.run_and_verify_svn(None, not_wc % 'A',
'upgrade', sbox.ospath('A'))
# Now upgrade the working copy
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
# Actually check the format number of the upgraded working copy
check_format(sbox, get_current_format())
# Now check the contents of the working copy
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
check_pristine(sbox, ['iota', 'A/mu'])
def upgrade_with_externals(sbox):
"upgrade with externals"
# Create wc from tarfile, uses the same structure of the wc as the tests
# in externals_tests.py.
replace_sbox_with_tarfile(sbox, 'upgrade_with_externals.tar.bz2')
# Attempt to use the working copy, this should give an error
expected_stderr = wc_is_too_old_regex
svntest.actions.run_and_verify_svn(None, expected_stderr,
'info', sbox.wc_dir)
# Now upgrade the working copy
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
# Actually check the format number of the upgraded working copy
check_format(sbox, get_current_format())
check_pristine(sbox, ['iota', 'A/mu',
'A/D/x/lambda', 'A/D/x/E/alpha'])
def upgrade_1_5_body(sbox, subcommand):
replace_sbox_with_tarfile(sbox, 'upgrade_1_5.tar.bz2')
# Attempt to use the working copy, this should give an error
expected_stderr = wc_is_too_old_regex
svntest.actions.run_and_verify_svn(None, expected_stderr,
subcommand, sbox.wc_dir)
# Now upgrade the working copy
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
# Check the format of the working copy
check_format(sbox, get_current_format())
# Now check the contents of the working copy
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
check_pristine(sbox, ['iota', 'A/mu'])
def upgrade_1_5(sbox):
"test upgrading from a 1.5-era working copy"
return upgrade_1_5_body(sbox, 'info')
def update_1_5(sbox):
"test updating a 1.5-era working copy"
# The 'update' printed:
# Skipped 'svn-test-work\working_copies\upgrade_tests-3'
# Summary of conflicts:
# Skipped paths: 1
return upgrade_1_5_body(sbox, 'update')
def logs_left_1_5(sbox):
"test upgrading from a 1.5-era wc with stale logs"
replace_sbox_with_tarfile(sbox, 'logs_left_1_5.tar.bz2')
# Try to upgrade, this should give an error
expected_stderr = (".*Cannot upgrade with existing logs; .*")
svntest.actions.run_and_verify_svn(None, expected_stderr,
'upgrade', sbox.wc_dir)
def upgrade_wcprops(sbox):
"test upgrading a working copy with wcprops"
replace_sbox_with_tarfile(sbox, 'upgrade_wcprops.tar.bz2')
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
# Make sure that .svn/all-wcprops has disappeared
dot_svn = svntest.main.get_admin_name()
if os.path.exists(os.path.join(sbox.wc_dir, dot_svn, 'all-wcprops')):
raise svntest.Failure("all-wcprops file still exists")
# Just for kicks, let's see if the wcprops are what we'd expect them
# to be. (This could be smarter.)
expected_dav_caches = {
'' :
'(svn:wc:ra_dav:version-url 41 /svn-test-work/local_tmp/repos/!svn/ver/1)',
'iota' :
'(svn:wc:ra_dav:version-url 46 /svn-test-work/local_tmp/repos/!svn/ver/1/iota)',
}
check_dav_cache(sbox.wc_dir, 1, expected_dav_caches)
# Poor mans relocate to fix up an 1.0 (xml style) working copy to refer to a
# valid repository, so svn upgrade can do its work on it
def xml_entries_relocate(path, from_url, to_url):
adm_name = svntest.main.get_admin_name()
entries = os.path.join(path, adm_name, 'entries')
txt = open(entries).read().replace('url="' + from_url, 'url="' + to_url)
os.chmod(entries, 0777)
open(entries, 'w').write(txt)
for dirent in os.listdir(path):
item_path = os.path.join(path, dirent)
if dirent == svntest.main.get_admin_name():
continue
if os.path.isdir(os.path.join(item_path, adm_name)):
xml_entries_relocate(item_path, from_url, to_url)
# Poor mans relocate to fix up an working copy to refer to a
# valid repository, so svn upgrade can do its work on it
def simple_entries_replace(path, from_url, to_url):
adm_name = svntest.main.get_admin_name()
entries = os.path.join(path, adm_name, 'entries')
txt = open(entries).read().replace(from_url, to_url)
os.chmod(entries, 0777)
open(entries, 'wb').write(txt)
for dirent in os.listdir(path):
item_path = os.path.join(path, dirent)
if dirent == svntest.main.get_admin_name():
continue
if os.path.isdir(os.path.join(item_path, adm_name)):
simple_entries_replace(item_path, from_url, to_url)
def basic_upgrade_1_0(sbox):
"test upgrading a working copy created with 1.0.0"
sbox.build(create_wc = False)
replace_sbox_with_tarfile(sbox, 'upgrade_1_0.tar.bz2')
url = sbox.repo_url
# This is non-canonical by the rules of svn_uri_canonicalize, it gets
# written into the entries file and upgrade has to canonicalize.
non_canonical_url = url[:-1] + '%%%02x' % ord(url[-1])
xml_entries_relocate(sbox.wc_dir, 'file:///1.0.0/repos', non_canonical_url)
# Attempt to use the working copy, this should give an error
expected_stderr = wc_is_too_old_regex
svntest.actions.run_and_verify_svn(None, expected_stderr,
'info', sbox.wc_dir)
# Now upgrade the working copy
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
# And the separate working copy below COPIED or check_format() fails
svntest.actions.run_and_verify_svn(None, [],
'upgrade',
os.path.join(sbox.wc_dir, 'COPIED', 'G'))
# Actually check the format number of the upgraded working copy
check_format(sbox, get_current_format())
# Now check the contents of the working copy
# #### This working copy is not just a basic tree,
# fix with the right data once we get here
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev=7),
'B' : Item(status=' ', wc_rev='7'),
'B/mu' : Item(status=' ', wc_rev='7'),
'B/D' : Item(status=' ', wc_rev='7'),
'B/D/H' : Item(status=' ', wc_rev='7'),
'B/D/H/psi' : Item(status=' ', wc_rev='7'),
'B/D/H/omega' : Item(status=' ', wc_rev='7'),
'B/D/H/zeta' : Item(status='MM', wc_rev='7'),
'B/D/H/chi' : Item(status=' ', wc_rev='7'),
'B/D/gamma' : Item(status=' ', wc_rev='9'),
'B/D/G' : Item(status=' ', wc_rev='7'),
'B/D/G/tau' : Item(status=' ', wc_rev='7'),
'B/D/G/rho' : Item(status=' ', wc_rev='7'),
'B/D/G/pi' : Item(status=' ', wc_rev='7'),
'B/B' : Item(status=' ', wc_rev='7'),
'B/B/lambda' : Item(status=' ', wc_rev='7'),
'MKDIR' : Item(status='A ', wc_rev='0'),
'MKDIR/MKDIR' : Item(status='A ', wc_rev='0'),
'A' : Item(status=' ', wc_rev='7'),
'A/B' : Item(status=' ', wc_rev='7'),
'A/B/lambda' : Item(status=' ', wc_rev='7'),
'A/D' : Item(status=' ', wc_rev='7'),
'A/D/G' : Item(status=' ', wc_rev='7'),
'A/D/G/rho' : Item(status=' ', wc_rev='7'),
'A/D/G/pi' : Item(status=' ', wc_rev='7'),
'A/D/G/tau' : Item(status=' ', wc_rev='7'),
'A/D/H' : Item(status=' ', wc_rev='7'),
'A/D/H/psi' : Item(status=' ', wc_rev='7'),
'A/D/H/omega' : Item(status=' ', wc_rev='7'),
'A/D/H/zeta' : Item(status=' ', wc_rev='7'),
'A/D/H/chi' : Item(status=' ', wc_rev='7'),
'A/D/gamma' : Item(status=' ', wc_rev='7'),
'A/mu' : Item(status=' ', wc_rev='7'),
'iota' : Item(status=' ', wc_rev='7'),
'COPIED' : Item(status=' ', wc_rev='10'),
'DELETED' : Item(status='D ', wc_rev='10'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
expected_infos = [ {
'Node Kind': 'directory',
'Schedule': 'normal',
'Revision': '7',
'Last Changed Author' : 'Bert',
'Last Changed Rev' : '7'
} ]
svntest.actions.run_and_verify_info(expected_infos, sbox.wc_dir)
expected_infos = [ {
'Node Kind': 'directory',
'Schedule': 'delete',
'Revision': '10',
'Last Changed Author' : 'Bert',
'Last Changed Rev' : '10'
} ]
svntest.actions.run_and_verify_info(expected_infos,
os.path.join(sbox.wc_dir, 'DELETED'))
check_pristine(sbox, ['iota', 'A/mu', 'A/D/H/zeta'])
# Helper function for the x3 tests.
def do_x3_upgrade(sbox, expected_error=[]):
# Attempt to use the working copy, this should give an error
expected_stderr = wc_is_too_old_regex
svntest.actions.run_and_verify_svn(None, expected_stderr,
'info', sbox.wc_dir)
# Now upgrade the working copy
svntest.actions.run_and_verify_svn(None, expected_error,
'upgrade', sbox.wc_dir)
if expected_error != []:
return
# Actually check the format number of the upgraded working copy
check_format(sbox, get_current_format())
# Now check the contents of the working copy
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='2'),
'A' : Item(status=' ', wc_rev='2'),
'A/D' : Item(status=' ', wc_rev='2'),
'A/D/H' : Item(status=' ', wc_rev='2'),
'A/D/H/omega' : Item(status=' ', wc_rev='2'),
'A/D/H/psi' : Item(status='D ', wc_rev='2'),
'A/D/H/new' : Item(status='A ', copied='+', wc_rev='-'),
'A/D/H/chi' : Item(status='R ', copied='+', wc_rev='-'),
'A/D/gamma' : Item(status='D ', wc_rev='2'),
'A/D/G' : Item(status=' ', wc_rev='2'),
'A/B_new' : Item(status='A ', copied='+', wc_rev='-'),
'A/B_new/B' : Item(status='A ', copied='+', wc_rev='-'),
'A/B_new/B/E' : Item(status=' M', copied='+', wc_rev='-'),
'A/B_new/B/E/alpha' : Item(status=' ', copied='+', wc_rev='-'),
'A/B_new/B/E/beta' : Item(status='R ', copied='+', wc_rev='-'),
'A/B_new/B/new' : Item(status='A ', copied='+', wc_rev='-'),
'A/B_new/B/lambda' : Item(status='R ', copied='+', wc_rev='-'),
'A/B_new/B/F' : Item(status=' ', copied='+', wc_rev='-'),
'A/B_new/E' : Item(status=' M', copied='+', wc_rev='-'),
'A/B_new/E/alpha' : Item(status=' M', copied='+', wc_rev='-'),
'A/B_new/E/beta' : Item(status='RM', copied='+', wc_rev='-'),
'A/B_new/lambda' : Item(status='R ', copied='+', wc_rev='-'),
'A/B_new/new' : Item(status='A ', copied='+', wc_rev='-'),
'A/B_new/F' : Item(status=' ', copied='+', wc_rev='-'),
'A/B' : Item(status=' ', wc_rev='2'),
'A/B/E' : Item(status=' ', wc_rev='2'),
'A/B/E/beta' : Item(status='RM', copied='+', wc_rev='-'),
'A/B/E/alpha' : Item(status=' M', wc_rev='2'),
'A/B/F' : Item(status=' ', wc_rev='2'),
'A/B/lambda' : Item(status='R ', copied='+', wc_rev='-'),
'A/B/new' : Item(status='A ', copied='+', wc_rev='-'),
'A/G_new' : Item(status='A ', copied='+', wc_rev='-'),
'A/G_new/rho' : Item(status='R ', copied='+', wc_rev='-'),
'iota' : Item(status=' ', wc_rev='2'),
'A_new' : Item(status='A ', wc_rev='0'),
'A_new/alpha' : Item(status='A ', copied='+', wc_rev='-'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
simple_property_verify(sbox.wc_dir, {
'A/B_new/E/beta' : {'x3' : '3x',
'svn:eol-style': 'native'},
'A/B/E/beta' : {'s' : 't',
'svn:eol-style': 'native'},
'A/B_new/B/E/alpha' : {'svn:eol-style': 'native'},
'A/B/E/alpha' : {'q': 'r',
'svn:eol-style': 'native'},
'A_new/alpha' : {'svn:eol-style': 'native'},
'A/B_new/B/new' : {'svn:eol-style': 'native'},
'A/B_new/E/alpha' : {'svn:eol-style': 'native',
'u': 'v'},
'A/B_new/B/E' : {'q': 'r'},
'A/B_new/lambda' : {'svn:eol-style': 'native'},
'A/B_new/E' : {'x3': '3x'},
'A/B_new/new' : {'svn:eol-style': 'native'},
'A/B/lambda' : {'svn:eol-style': 'native'},
'A/B_new/B/E/beta' : {'svn:eol-style': 'native'},
'A/B_new/B/lambda' : {'svn:eol-style': 'native'},
'A/B/new' : {'svn:eol-style': 'native'},
'A/G_new/rho' : {'svn:eol-style': 'native'}
})
svntest.actions.run_and_verify_svn('Reverted.*', [],
'revert', '-R', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='2'),
'A' : Item(status=' ', wc_rev='2'),
'A/D' : Item(status=' ', wc_rev='2'),
'A/D/H' : Item(status=' ', wc_rev='2'),
'A/D/H/omega' : Item(status=' ', wc_rev='2'),
'A/D/H/psi' : Item(status=' ', wc_rev='2'),
'A/D/H/chi' : Item(status=' ', wc_rev='2'),
'A/D/gamma' : Item(status=' ', wc_rev='2'),
'A/D/G' : Item(status=' ', wc_rev='2'),
'A/B' : Item(status=' ', wc_rev='2'),
'A/B/F' : Item(status=' ', wc_rev='2'),
'A/B/E' : Item(status=' ', wc_rev='2'),
'A/B/E/beta' : Item(status=' ', wc_rev='2'),
'A/B/E/alpha' : Item(status=' ', wc_rev='2'),
'A/B/lambda' : Item(status=' ', wc_rev='2'),
'iota' : Item(status=' ', wc_rev='2'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
simple_property_verify(sbox.wc_dir, {
'A/B/E/beta' : {'svn:eol-style': 'native'},
# 'A/B/lambda' : {'svn:eol-style': 'native'},
'A/B/E/alpha' : {'svn:eol-style': 'native'}
})
@Issue(2530)
def x3_1_4_0(sbox):
"3x same wc upgrade 1.4.0 test"
replace_sbox_with_tarfile(sbox, 'wc-3x-1.4.0.tar.bz2', dir='wc-1.4.0')
do_x3_upgrade(sbox, expected_error='.*E155016: The properties of.*are in an '
'indeterminate state and cannot be upgraded. See issue #2530.')
@Issue(3811)
def x3_1_4_6(sbox):
"3x same wc upgrade 1.4.6 test"
replace_sbox_with_tarfile(sbox, 'wc-3x-1.4.6.tar.bz2', dir='wc-1.4.6')
do_x3_upgrade(sbox)
@Issue(3811)
def x3_1_6_12(sbox):
"3x same wc upgrade 1.6.12 test"
replace_sbox_with_tarfile(sbox, 'wc-3x-1.6.12.tar.bz2', dir='wc-1.6.12')
do_x3_upgrade(sbox)
def missing_dirs(sbox):
"missing directories and obstructing files"
# tarball wc looks like:
# svn co URL wc
# svn cp wc/A/B wc/A/B_new
# rm -rf wc/A/B/E wc/A/D wc/A/B_new/E wc/A/B_new/F
# touch wc/A/D wc/A/B_new/F
replace_sbox_with_tarfile(sbox, 'missing-dirs.tar.bz2')
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='1'),
'A' : Item(status=' ', wc_rev='1'),
'A/mu' : Item(status=' ', wc_rev='1'),
'A/C' : Item(status=' ', wc_rev='1'),
'A/D' : Item(status='! ', wc_rev='1'),
'A/B' : Item(status=' ', wc_rev='1'),
'A/B/F' : Item(status=' ', wc_rev='1'),
'A/B/E' : Item(status='! ', wc_rev='1'),
'A/B/lambda' : Item(status=' ', wc_rev='1'),
'iota' : Item(status=' ', wc_rev='1'),
'A/B_new' : Item(status='A ', wc_rev='-', copied='+'),
'A/B_new/E' : Item(status='! ', wc_rev='-'),
'A/B_new/F' : Item(status='! ', wc_rev='-'),
'A/B_new/lambda' : Item(status=' ', wc_rev='-', copied='+'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
def missing_dirs2(sbox):
"missing directories and obstructing dirs"
replace_sbox_with_tarfile(sbox, 'missing-dirs.tar.bz2')
os.remove(sbox.ospath('A/D'))
os.remove(sbox.ospath('A/B_new/F'))
os.mkdir(sbox.ospath('A/D'))
os.mkdir(sbox.ospath('A/B_new/F'))
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='1'),
'A' : Item(status=' ', wc_rev='1'),
'A/mu' : Item(status=' ', wc_rev='1'),
'A/C' : Item(status=' ', wc_rev='1'),
'A/D' : Item(status='! ', wc_rev='1'),
'A/B' : Item(status=' ', wc_rev='1'),
'A/B/F' : Item(status=' ', wc_rev='1'),
'A/B/E' : Item(status='! ', wc_rev='1'),
'A/B/lambda' : Item(status=' ', wc_rev='1'),
'iota' : Item(status=' ', wc_rev='1'),
'A/B_new' : Item(status='A ', wc_rev='-', copied='+'),
'A/B_new/E' : Item(status='! ', wc_rev='-'),
'A/B_new/F' : Item(status='! ', wc_rev='-'),
'A/B_new/lambda' : Item(status=' ', wc_rev='-', copied='+'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
@Issue(3808)
def delete_and_keep_local(sbox):
"check status delete and delete --keep-local"
replace_sbox_with_tarfile(sbox, 'wc-delete.tar.bz2')
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='0'),
'Normal' : Item(status=' ', wc_rev='1'),
'Deleted-Keep-Local': Item(status='D ', wc_rev='1'),
'Deleted' : Item(status='D ', wc_rev='1'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
# Deleted-Keep-Local should still exist after the upgrade
if not os.path.exists(os.path.join(sbox.wc_dir, 'Deleted-Keep-Local')):
raise svntest.Failure('wc/Deleted-Keep-Local should exist')
# Deleted should be removed after the upgrade as it was
# schedule delete and doesn't contain unversioned changes.
if os.path.exists(os.path.join(sbox.wc_dir, 'Deleted')):
raise svntest.Failure('wc/Deleted should not exist')
def dirs_only_upgrade(sbox):
"upgrade a wc without files"
replace_sbox_with_tarfile(sbox, 'dirs-only.tar.bz2')
expected_output = ["Upgraded '%s'\n" % (sbox.ospath('').rstrip(os.path.sep)),
"Upgraded '%s'\n" % (sbox.ospath('A'))]
svntest.actions.run_and_verify_svn(expected_output, [],
'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir, {
'' : Item(status=' ', wc_rev='1'),
'A' : Item(status=' ', wc_rev='1'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
def read_tree_conflict_data(sbox, path):
dot_svn = svntest.main.get_admin_name()
db = svntest.sqlite3.connect(os.path.join(sbox.wc_dir, dot_svn, 'wc.db'))
for row in db.execute("select tree_conflict_data from actual_node "
"where tree_conflict_data is not null "
"and local_relpath = '%s'" % path):
return
raise svntest.Failure("conflict expected for '%s'" % path)
def no_actual_node(sbox, path):
dot_svn = svntest.main.get_admin_name()
db = svntest.sqlite3.connect(os.path.join(sbox.wc_dir, dot_svn, 'wc.db'))
for row in db.execute("select 1 from actual_node "
"where local_relpath = '%s'" % path):
raise svntest.Failure("no actual node expected for '%s'" % path)
def upgrade_tree_conflict_data(sbox):
"upgrade tree conflict data (f20->f21)"
wc_dir = sbox.wc_dir
replace_sbox_with_tarfile(sbox, 'upgrade_tc.tar.bz2')
# Check and see if we can still read our tree conflicts
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
expected_status.tweak('A/D/G/pi', status='D ', treeconflict='C')
expected_status.tweak('A/D/G/tau', status='! ', treeconflict='C',
wc_rev=None)
expected_status.tweak('A/D/G/rho', status='A ', copied='+',
treeconflict='C', wc_rev='-')
# Look inside pre-upgrade database
read_tree_conflict_data(sbox, 'A/D/G')
no_actual_node(sbox, 'A/D/G/pi')
no_actual_node(sbox, 'A/D/G/rho')
no_actual_node(sbox, 'A/D/G/tau')
# While the upgrade from f20 to f21 will work the upgrade from f22
# to f23 will not, since working nodes are present.
exit_code, output, errput = svntest.main.run_svn('format 22', 'upgrade',
wc_dir)
if not exit_code:
run_and_verify_status_no_server(wc_dir, expected_status)
else:
if not svntest.verify.RegexOutput('.*format 22 with WORKING nodes.*',
match_all=False).matches(errput):
raise svntest.Failure()
# Look insde post-upgrade database
read_tree_conflict_data(sbox, 'A/D/G/pi')
read_tree_conflict_data(sbox, 'A/D/G/rho')
read_tree_conflict_data(sbox, 'A/D/G/tau')
# no_actual_node(sbox, 'A/D/G') ### not removed but should be?
@Issue(3898)
def delete_in_copy_upgrade(sbox):
"upgrade a delete within a copy"
wc_dir = sbox.wc_dir
replace_sbox_with_tarfile(sbox, 'delete-in-copy.tar.bz2')
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
expected_status.add({
'A/B-copied' : Item(status='A ', copied='+', wc_rev='-'),
'A/B-copied/lambda' : Item(status=' ', copied='+', wc_rev='-'),
'A/B-copied/E' : Item(status='D ', copied='+', wc_rev='-'),
'A/B-copied/E/alpha' : Item(status='D ', copied='+', wc_rev='-'),
'A/B-copied/E/beta' : Item(status='D ', copied='+', wc_rev='-'),
'A/B-copied/F' : Item(status=' ', copied='+', wc_rev='-'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
svntest.actions.run_and_verify_svn('Reverted.*', [], 'revert', '-R',
sbox.ospath('A/B-copied/E'))
expected_status.tweak('A/B-copied/E',
'A/B-copied/E/alpha',
'A/B-copied/E/beta',
status=' ')
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
simple_checksum_verify([[sbox.ospath('A/B-copied/E/alpha'),
'b347d1da69df9a6a70433ceeaa0d46c8483e8c03']])
def replaced_files(sbox):
"upgrade with base and working replaced files"
wc_dir = sbox.wc_dir
replace_sbox_with_tarfile(sbox, 'replaced-files.tar.bz2')
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
# A is a checked-out dir containing A/f and A/g, then
# svn cp wc/A wc/B
# svn rm wc/A/f wc/B/f
# svn cp wc/A/g wc/A/f # A/f replaced by copied A/g
# svn cp wc/A/g wc/B/f # B/f replaced by copied A/g (working-only)
# svn rm wc/A/g wc/B/g
# touch wc/A/g wc/B/g
# svn add wc/A/g wc/B/g # A/g replaced, B/g replaced (working-only)
# svn ps pX vX wc/A/g
# svn ps pY vY wc/B/g
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='5'),
'A' : Item(status=' ', wc_rev='5'),
'A/f' : Item(status='R ', wc_rev='-', copied='+'),
'A/g' : Item(status='RM', wc_rev='5'),
'B' : Item(status='A ', wc_rev='-', copied='+'),
'B/f' : Item(status='R ', wc_rev='-', copied='+'),
'B/g' : Item(status='RM', wc_rev='-'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
simple_property_verify(sbox.wc_dir, {
'A/f' : {'pAg' : 'vAg' },
'A/g' : {'pX' : 'vX' },
'B/f' : {'pAg' : 'vAg' },
'B/g' : {'pY' : 'vY' },
})
simple_checksum_verify([
[sbox.ospath('A/f'), '395dfb603d8a4e0348d0b082803f2b7426c76eb9'],
[sbox.ospath('A/g'), None],
[sbox.ospath('B/f'), '395dfb603d8a4e0348d0b082803f2b7426c76eb9'],
[sbox.ospath('B/g'), None]])
svntest.actions.run_and_verify_svn('Reverted.*', [], 'revert',
sbox.ospath('A/f'), sbox.ospath('B/f'),
sbox.ospath('A/g'), sbox.ospath('B/g'))
simple_property_verify(sbox.wc_dir, {
'A/f' : {'pAf' : 'vAf' },
'A/g' : {'pAg' : 'vAg' },
'B/f' : {'pAf' : 'vAf' },
'B/g' : {'pAg' : 'vAg' },
})
simple_checksum_verify([
[sbox.ospath('A/f'), '958eb2d755df2d9e0de6f7b835aec16b64d83f6f'],
[sbox.ospath('A/g'), '395dfb603d8a4e0348d0b082803f2b7426c76eb9'],
[sbox.ospath('B/f'), '958eb2d755df2d9e0de6f7b835aec16b64d83f6f'],
[sbox.ospath('B/g'), '395dfb603d8a4e0348d0b082803f2b7426c76eb9']])
def upgrade_with_scheduled_change(sbox):
"upgrade 1.6.x wc with a scheduled change"
replace_sbox_with_tarfile(sbox, 'upgrade_with_scheduled_change.tar.bz2')
svntest.actions.run_and_verify_svn(None, [],
'upgrade', sbox.wc_dir)
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
expected_status.add({
'A/scheduled_file_1' : Item(status='A ', wc_rev='-'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
@Issue(3777)
def tree_replace1(sbox):
"upgrade 1.6 with tree replaced"
replace_sbox_with_tarfile(sbox, 'tree-replace1.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' M', wc_rev=17),
'B' : Item(status='R ', copied='+', wc_rev='-'),
'B/f' : Item(status=' ', copied='+', wc_rev='-'),
'B/g' : Item(status='D ', wc_rev=17),
'B/h' : Item(status=' ', copied='+', wc_rev='-'),
'B/C' : Item(status=' ', copied='+', wc_rev='-'),
'B/C/f' : Item(status=' ', copied='+', wc_rev='-'),
'B/D' : Item(status='D ', wc_rev=17),
'B/D/f' : Item(status='D ', wc_rev=17),
'B/E' : Item(status=' ', copied='+', wc_rev='-'),
'B/E/f' : Item(status=' ', copied='+', wc_rev='-'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
@Issue(3777)
def tree_replace2(sbox):
"upgrade 1.6 with tree replaced (2)"
replace_sbox_with_tarfile(sbox, 'tree-replace2.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' M', wc_rev=12),
'B' : Item(status='R ', copied='+', wc_rev='-'),
'B/f' : Item(status='D ', wc_rev=12),
'B/D' : Item(status='D ', wc_rev=12),
'B/g' : Item(status=' ', copied='+', wc_rev='-'),
'B/E' : Item(status=' ', copied='+', wc_rev='-'),
'C' : Item(status='R ', copied='+', wc_rev='-'),
'C/f' : Item(status=' ', copied='+', wc_rev='-'),
'C/D' : Item(status=' ', copied='+', wc_rev='-'),
'C/g' : Item(status='D ', wc_rev=12),
'C/E' : Item(status='D ', wc_rev=12),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
def upgrade_from_format_28(sbox):
"""upgrade from format 28: rename pristines"""
# Start with a format-28 WC that is a clean checkout of the Greek tree.
replace_sbox_with_tarfile(sbox, 'format_28.tar.bz2')
# Get the old and new pristine file paths for file 'iota'.
checksum = '2c0aa9014a0cd07f01795a333d82485ef6d083e2'
old_pristine_path = os.path.join(sbox.wc_dir, svntest.main.get_admin_name(),
'pristine', checksum[0:2], checksum)
new_pristine_path = old_pristine_path + '.svn-base'
assert os.path.exists(old_pristine_path)
assert not os.path.exists(new_pristine_path)
# Upgrade the WC
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
assert not os.path.exists(old_pristine_path)
assert os.path.exists(new_pristine_path)
@Issue(3901)
def depth_exclude(sbox):
"upgrade 1.6.x wc that has depth=exclude"
replace_sbox_with_tarfile(sbox, 'depth_exclude.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='1'),
'A' : Item(status=' ', wc_rev='1'),
'X' : Item(status='A ', copied='+', wc_rev='-'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
@Issue(3901)
def depth_exclude_2(sbox):
"1.6.x wc that has depth=exclude inside a delete"
replace_sbox_with_tarfile(sbox, 'depth_exclude_2.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='1'),
'A' : Item(status='D ', wc_rev='1'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
@Issue(3916)
def add_add_del_del_tc(sbox):
"wc with add-add and del-del tree conflicts"
replace_sbox_with_tarfile(sbox, 'add_add_del_del_tc.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='4'),
'A' : Item(status=' ', wc_rev='4'),
'A/B' : Item(status='A ', treeconflict='C', copied='+', wc_rev='-'),
'X' : Item(status=' ', wc_rev='3'),
'X/Y' : Item(status='! ', treeconflict='C')
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
@Issue(3916)
def add_add_x2(sbox):
"wc with 2 tree conflicts in same entry"
replace_sbox_with_tarfile(sbox, 'add_add_x2.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='3'),
'A' : Item(status=' ', wc_rev='3'),
'A/X' : Item(status='A ', treeconflict='C', copied='+', wc_rev='-'),
'A/Y' : Item(status='A ', treeconflict='C', copied='+', wc_rev='-'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
@Issue(3940)
def upgrade_with_missing_subdir(sbox):
"test upgrading a working copy with missing subdir"
sbox.build(create_wc = False)
replace_sbox_with_tarfile(sbox, 'basic_upgrade.tar.bz2')
simple_entries_replace(sbox.wc_dir,
'file:///Users/Hyrum/dev/test/greek-1.6.repo',
sbox.repo_url)
svntest.main.run_svnadmin('setuuid', sbox.repo_dir,
'cafefeed-babe-face-dead-beeff00dfade')
url = sbox.repo_url
wc_dir = sbox.wc_dir
# Attempt to use the working copy, this should give an error
expected_stderr = wc_is_too_old_regex
svntest.actions.run_and_verify_svn(None, expected_stderr,
'info', sbox.wc_dir)
# Now remove a subdirectory
svntest.main.safe_rmtree(sbox.ospath('A/B'))
# Now upgrade the working copy and expect a missing subdir
expected_output = svntest.verify.UnorderedOutput([
"Upgraded '%s'\n" % sbox.wc_dir,
"Upgraded '%s'\n" % sbox.ospath('A'),
"Skipped '%s'\n" % sbox.ospath('A/B'),
"Upgraded '%s'\n" % sbox.ospath('A/C'),
"Upgraded '%s'\n" % sbox.ospath('A/D'),
"Upgraded '%s'\n" % sbox.ospath('A/D/G'),
"Upgraded '%s'\n" % sbox.ospath('A/D/H'),
])
svntest.actions.run_and_verify_svn(expected_output, [],
'upgrade', sbox.wc_dir)
# And now perform an update. (This used to fail with an assertion)
expected_output = svntest.wc.State(wc_dir, {
'A/B' : Item(verb='Restored'),
'A/B/E' : Item(status='A '),
'A/B/E/alpha' : Item(status='A '),
'A/B/E/beta' : Item(status='A '),
'A/B/lambda' : Item(status='A '),
'A/B/F' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
# Do the update and check the results in three ways.
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status)
@Issue(3994)
def upgrade_locked(sbox):
"upgrade working copy with locked files"
replace_sbox_with_tarfile(sbox, 'upgrade_locked.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev=1),
'A' : Item(status='D ', wc_rev=2),
'A/third' : Item(status='D ', writelocked='K', wc_rev=2),
'other' : Item(status='D ', writelocked='K', wc_rev=4),
'iota' : Item(status=' ', writelocked='K', wc_rev=3),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
@Issue(4015)
def upgrade_file_externals(sbox):
"upgrade with file externals"
sbox.build()
replace_sbox_with_tarfile(sbox, 'upgrade_file_externals.tar.bz2')
svntest.main.run_svnadmin('setuuid', sbox.repo_dir,
'07146bbd-0b64-4aaf-ab70-cd76a0df2d41')
expected_output = svntest.verify.RegexOutput('r2 committed.*')
svntest.actions.run_and_verify_svnmucc(expected_output, [],
'-m', 'r2',
'propset', 'svn:externals',
'^/A/B/E EX\n^/A/mu muX',
sbox.repo_url + '/A/B/F')
expected_output = svntest.verify.RegexOutput('r3 committed.*')
svntest.actions.run_and_verify_svnmucc(expected_output, [],
'-m', 'r3',
'propset', 'svn:externals',
'^/A/B/F FX\n^/A/B/lambda lambdaX',
sbox.repo_url + '/A/C')
expected_output = svntest.verify.RegexOutput('r4 committed.*')
svntest.actions.run_and_verify_svnmucc(expected_output, [],
'-m', 'r4',
'propset', 'pname1', 'pvalue1',
sbox.repo_url + '/A/mu',
'propset', 'pname2', 'pvalue2',
sbox.repo_url + '/A/B/lambda',
'propset', 'pname3', 'pvalue3',
sbox.repo_url + '/A/B/E/alpha')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
svntest.actions.run_and_verify_svn(None, [], 'relocate',
'file:///tmp/repo', sbox.repo_url,
sbox.wc_dir)
expected_output = svntest.wc.State(sbox.wc_dir, {
'A/mu' : Item(status=' U'),
'A/B/lambda' : Item(status=' U'),
'A/B/E/alpha' : Item(status=' U'),
'A/C/FX/EX/alpha' : Item(status=' U'),
'A/C/FX/muX' : Item(status=' U'),
'A/C/lambdaX' : Item(status=' U'),
'A/B/F/EX/alpha' : Item(status=' U'),
'A/B/F/muX' : Item(status=' U'),
})
svntest.actions.run_and_verify_update(sbox.wc_dir, expected_output,
None, None)
### simple_property_verify only sees last line of multi-line
### property values such as svn:externals
simple_property_verify(sbox.wc_dir, {
'A/mu' : {'pname1' : 'pvalue1' },
'A/B/lambda' : {'pname2' : 'pvalue2' },
'A/B/E/alpha' : {'pname3' : 'pvalue3' },
'A/B/F' : {'svn:externals' : '^/A/mu muX'},
'A/C' : {'svn:externals' : '^/A/B/lambda lambdaX'},
'A/B/F/muX' : {'pname1' : 'pvalue1' },
'A/C/lambdaX' : {'pname2' : 'pvalue2' },
})
simple_property_verify(sbox.ospath('A/C/FX'), {
'' : {'svn:externals' : '^/A/mu muX'},
'muX' : {'pname1' : 'pvalue1' },
})
simple_property_verify(sbox.ospath('A/C/FX/EX'), {
'alpha' : {'pname3' : 'pvalue3' },
})
@Issue(4035)
def upgrade_missing_replaced(sbox):
"upgrade with missing replaced dir"
sbox.build(create_wc=False)
replace_sbox_with_tarfile(sbox, 'upgrade_missing_replaced.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
svntest.main.run_svnadmin('setuuid', sbox.repo_dir,
'd7130b12-92f6-45c9-9217-b9f0472c3fab')
svntest.actions.run_and_verify_svn(None, [], 'relocate',
'file:///tmp/repo', sbox.repo_url,
sbox.wc_dir)
expected_output = svntest.wc.State(sbox.wc_dir, {
'A/B/E' : Item(status=' ', treeconflict='C',
prev_verb='Restored'),
'A/B/E/alpha' : Item(status=' ', treeconflict='A'),
'A/B/E/beta' : Item(status=' ', treeconflict='A'),
})
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
expected_status.tweak('A/B/E', status='! ', treeconflict='C', wc_rev='-',
entry_status='R ', entry_rev='1')
expected_status.tweak('A/B/E/alpha', 'A/B/E/beta', status='D ')
# This upgrade installs an INCOMPLETE node in WORKING for E, which makes the
# database technically invalid... but we did that for 1.7 and nobody noticed.
# Pass the old status tree to avoid testing via entries-dump
# as fetching the entries crashes on the invalid db state.
svntest.actions.run_and_verify_update(sbox.wc_dir, expected_output,
None, expected_status)
svntest.actions.run_and_verify_svn('Reverted.*', [], 'revert', '-R',
sbox.wc_dir)
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
# And verify that the state is now valid in both the entries an status world.
svntest.actions.run_and_verify_status(sbox.wc_dir, expected_status)
@Issue(4033)
def upgrade_not_present_replaced(sbox):
"upgrade with not-present replaced nodes"
sbox.build(create_wc=False)
replace_sbox_with_tarfile(sbox, 'upgrade_not_present_replaced.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
svntest.main.run_svnadmin('setuuid', sbox.repo_dir,
'd7130b12-92f6-45c9-9217-b9f0472c3fab')
svntest.actions.run_and_verify_svn(None, [], 'relocate',
'file:///tmp/repo', sbox.repo_url,
sbox.wc_dir)
expected_output = svntest.wc.State(sbox.wc_dir, {
'A/B/E' : Item(status='E '),
'A/B/E/alpha' : Item(status='A '),
'A/B/E/beta' : Item(status='A '),
'A/B/lambda' : Item(status='E '),
})
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
svntest.actions.run_and_verify_update(sbox.wc_dir, expected_output,
None, expected_status)
@Issue(4307)
def upgrade_from_1_7_conflict(sbox):
"upgrade from 1.7 WC with conflict (format 29)"
sbox.build(create_wc=False)
replace_sbox_with_tarfile(sbox, 'upgrade_from_1_7_wc.tar.bz2')
# The working copy contains a text conflict, and upgrading such
# a working copy used to cause a pointless 'upgrade required' error.
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
def do_iprops_upgrade(nonrootfile, rootfile, sbox):
wc_dir = sbox.wc_dir
replace_sbox_with_tarfile(sbox, nonrootfile)
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
svntest.actions.run_and_verify_svn(None, [], 'relocate',
'file:///tmp/repo', sbox.repo_url, wc_dir)
expected_output = []
expected_disk = svntest.wc.State('', {
'E' : Item(),
'E/alpha' : Item(contents="This is the file 'alpha'.\n"),
'E/beta' : Item(contents="This is the file 'beta'.\n"),
'F' : Item(),
'lambda' : Item(contents="This is the file 'lambda'.\n"),
})
expected_status = svntest.wc.State(sbox.wc_dir, {
'' : Item(),
'E' : Item(switched='S'),
'E/alpha' : Item(),
'E/beta' : Item(),
'F' : Item(),
'lambda' : Item(),
})
expected_status.tweak(status=' ', wc_rev=2)
# No inherited props after upgrade until an update
expected_iprops = {}
expected_explicit_props = {}
svntest.actions.run_and_verify_inherited_prop_xml(
wc_dir, expected_iprops, expected_explicit_props)
svntest.actions.run_and_verify_inherited_prop_xml(
sbox.ospath('E'), expected_iprops, expected_explicit_props)
# Update populates the inherited props
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status)
expected_iprops = {sbox.repo_url : {'p' : 'v'},
sbox.repo_url + '/A' : {'pA' : 'vA'}}
svntest.actions.run_and_verify_inherited_prop_xml(
wc_dir, expected_iprops, expected_explicit_props)
expected_iprops = {sbox.repo_url : {'p' : 'v'},
sbox.repo_url + '/X' : {'pX' : 'vX'}}
svntest.actions.run_and_verify_inherited_prop_xml(
sbox.ospath('E'), expected_iprops, expected_explicit_props)
# Now try with a repository root working copy
replace_sbox_with_tarfile(sbox, rootfile)
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
svntest.actions.run_and_verify_svn(None, [], 'relocate',
'file:///tmp/repo', sbox.repo_url, wc_dir)
# Unswitched inherited props available after upgrade
expected_iprops = {wc_dir : {'p' : 'v'},
sbox.ospath('A') : {'pA' : 'vA'}}
svntest.actions.run_and_verify_inherited_prop_xml(
sbox.ospath('A/B'), expected_iprops, expected_explicit_props)
# Switched inherited props not populated until update after upgrade
expected_iprops = {}
svntest.actions.run_and_verify_inherited_prop_xml(
sbox.ospath('A/B/E'), expected_iprops, expected_explicit_props)
expected_disk = svntest.wc.State('', {
'A' : Item(),
'A/B' : Item(),
'A/B/E' : Item(),
})
expected_status = svntest.wc.State(sbox.wc_dir, {
'' : Item(),
'A' : Item(),
'A/B' : Item(),
'A/B/E' : Item(switched='S'),
})
expected_status.tweak(status=' ', wc_rev=2)
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status)
expected_iprops = {wc_dir : {'p' : 'v'},
sbox.ospath('A') : {'pA' : 'vA'}}
svntest.actions.run_and_verify_inherited_prop_xml(
sbox.ospath('A/B'), expected_iprops, expected_explicit_props)
expected_iprops = {sbox.repo_url : {'p' : 'v'},
sbox.repo_url + '/X' : {'pX' : 'vX'}}
expected_explicit_props = {}
svntest.actions.run_and_verify_inherited_prop_xml(
sbox.ospath('A/B/E'), expected_iprops, expected_explicit_props)
def iprops_upgrade(sbox):
"inherited properties after upgrade from 1.7"
sbox.build()
sbox.simple_copy('A', 'X')
sbox.simple_propset('p', 'v', '')
sbox.simple_propset('pA', 'vA', 'A')
sbox.simple_propset('pX', 'vX', 'X')
sbox.simple_commit()
svntest.main.run_svnadmin('setuuid', sbox.repo_dir,
'8f4d0ebe-2ebf-4f62-ad11-804fd88c2382')
do_iprops_upgrade('iprops_upgrade_nonroot.tar.bz2',
'iprops_upgrade_root.tar.bz2',
sbox)
def iprops_upgrade1_6(sbox):
"inherited properties after upgrade from 1.6"
sbox.build()
sbox.simple_copy('A', 'X')
sbox.simple_propset('p', 'v', '')
sbox.simple_propset('pA', 'vA', 'A')
sbox.simple_propset('pX', 'vX', 'X')
sbox.simple_commit()
svntest.main.run_svnadmin('setuuid', sbox.repo_dir,
'8f4d0ebe-2ebf-4f62-ad11-804fd88c2382')
do_iprops_upgrade('iprops_upgrade_nonroot1_6.tar.bz2',
'iprops_upgrade_root1_6.tar.bz2',
sbox)
def changelist_upgrade_1_6(sbox):
"upgrade from 1.6 with changelist"
sbox.build(create_wc = False)
svntest.main.run_svnadmin('setuuid', sbox.repo_dir,
'aa4c97bd-2e1a-4e55-a1e5-3db22cff2673')
replace_sbox_with_tarfile(sbox, 'changelist_upgrade_1_6.tar.bz2')
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
exit_code, output, errput = svntest.main.run_svn(None, 'info', sbox.wc_dir,
'--depth', 'infinity',
'--changelist', 'foo')
paths = [x for x in output if x[:6] == 'Path: ']
expected_paths = ['Path: %s\n' % sbox.ospath('A/D/gamma')]
if paths != expected_paths:
raise svntest.Failure("changelist not matched")
def upgrade_1_7_dir_external(sbox):
"upgrade from 1.7 with dir external"
sbox.build(create_wc = False)
replace_sbox_with_tarfile(sbox, 'upgrade_1_7_dir_external.tar.bz2')
# This fails for 'make check EXCLUSIVE_WC_LOCKS=1' giving an error:
# svn: warning: W200033: sqlite[S5]: database is locked
svntest.actions.run_and_verify_svn(None, [], 'upgrade', sbox.wc_dir)
def auto_analyze(sbox):
"""automatic SQLite ANALYZE"""
sbox.build(create_wc = False)
replace_sbox_with_tarfile(sbox, 'wc-without-stat1.tar.bz2')
svntest.main.run_svnadmin('setuuid', sbox.repo_dir,
'52ec7e4b-e5f0-451d-829f-f05d5571b4ab')
# Don't use svn to do relocate as that will add the table.
svntest.wc.sqlite_exec(sbox.wc_dir,
"update repository "
"set root ='" + sbox.repo_url + "'")
val = svntest.wc.sqlite_stmt(sbox.wc_dir,
"select 1 from sqlite_master "
"where name = 'sqlite_stat1'")
if val != []:
raise svntest.Failure("initial state failed")
# Make working copy read-only (but not wc_dir itself as
# svntest.main.chmod_tree will not reset it.)
for path, subdirs, files in os.walk(sbox.wc_dir):
for d in subdirs:
os.chmod(os.path.join(path, d), 0555)
for f in files:
os.chmod(os.path.join(path, f), 0444)
state = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
svntest.actions.run_and_verify_status(sbox.wc_dir, state)
svntest.main.chmod_tree(sbox.wc_dir, 0666, 0022)
state = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
svntest.actions.run_and_verify_status(sbox.wc_dir, state)
val = svntest.wc.sqlite_stmt(sbox.wc_dir,
"select 1 from sqlite_master "
"where name = 'sqlite_stat1'")
if val != [(1,)]:
raise svntest.Failure("analyze failed")
########################################################################
# Run the tests
# prop states
#
# .base simple checkout
# .base, .revert delete, copy-here
# .working add, propset
# .base, .working checkout, propset
# .base, .revert, .working delete, copy-here, propset
# .revert, .working delete, add, propset
# .revert delete, add
#
# 1.3.x (f4)
# 1.4.0 (f8, buggy)
# 1.4.6 (f8, fixed)
# list all tests here, starting with None:
test_list = [ None,
basic_upgrade,
upgrade_with_externals,
upgrade_1_5,
update_1_5,
logs_left_1_5,
upgrade_wcprops,
basic_upgrade_1_0,
# Upgrading from 1.4.0-1.4.5 with specific states fails
# See issue #2530
x3_1_4_0,
x3_1_4_6,
x3_1_6_12,
missing_dirs,
missing_dirs2,
delete_and_keep_local,
dirs_only_upgrade,
upgrade_tree_conflict_data,
delete_in_copy_upgrade,
replaced_files,
upgrade_with_scheduled_change,
tree_replace1,
tree_replace2,
upgrade_from_format_28,
depth_exclude,
depth_exclude_2,
add_add_del_del_tc,
add_add_x2,
upgrade_with_missing_subdir,
upgrade_locked,
upgrade_file_externals,
upgrade_missing_replaced,
upgrade_not_present_replaced,
upgrade_from_1_7_conflict,
iprops_upgrade,
iprops_upgrade1_6,
changelist_upgrade_1_6,
upgrade_1_7_dir_external,
auto_analyze,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
import json
from superdesk.io.registry import register_feeding_service, register_feeding_service_parser
from superdesk.io.feeding_services.http_base_service import HTTPFeedingServiceBase
from superdesk.errors import IngestApiError, SuperdeskIngestError
import requests
import superdesk
from superdesk.io.feed_parsers import nitf
from lxml import etree
logger = logging.getLogger(__name__)
class APMediaFeedingService(HTTPFeedingServiceBase):
"""
Feeding Service class which can retrieve articles from Associated Press Media API
"""
NAME = 'ap media api'
label = 'AP Media API'
fields = [
{
'id': 'api_url', 'type': 'text', 'label': 'AP Media API URL',
'required': True, 'default_value': 'https://api.ap.org/media/v/content/feed'
},
{
'id': 'products_url', 'type': 'text', 'label': 'AP Media API Products URL',
'required': True, 'default_value': 'https://api.ap.org/media/v/account/plans'
},
{
'id': 'apikey', 'type': 'text', 'label': 'API Key',
'placeholder': 'API key for access to the API', 'required': True
},
{
'id': 'productList', 'type': 'text', 'label': 'Product List',
'placeholder': 'Use coma separated product id''s for multiple products, empty for all ', 'required': False
},
{
'id': 'availableProducts', 'type': 'text', 'label': 'All Available Products',
'readonly': True
},
{
'id': 'next_link', 'type': 'text', 'label': 'Next Link',
'readonly': True
}
]
HTTP_AUTH = False
HTTP_TIMEOUT = 40.0
def config_test(self, provider=None):
self._get_products(provider)
original = superdesk.get_resource_service('ingest_providers').find_one(req=None, _id=provider.get('_id'))
# If there has been a change in the required products then reset the next link
if original and original.get('config', {}).get('productList', '') != provider.get('config', {}).get(
'productList', ''):
provider['config']['next_link'] = None
def _get_products(self, provider):
"""
Get the products that are available for the API Key, effectively ensuring that the key is valid and provide an
indication of the product codes available in the UI.
:param provider:
:return:
"""
api_key = provider.get('config', {}).get('apikey')
r = requests.get(provider.get('config', {}).get('products_url') + '?apikey={}'.format(api_key),
timeout=self.HTTP_TIMEOUT, verify=False, allow_redirects=True)
r.raise_for_status()
productList = []
products = json.loads(r.text)
for plan in products.get('data', {}).get('plans'):
for entitlement in plan.get('entitlements'):
productList.append('{}'.format(entitlement.get('id')))
provider['config']['availableProducts'] = ','.join(productList)
def prepare_href(self, href, mimetype=None):
href = href + '&apikey=' + self.provider.get('config', {}).get('apikey') if '?' in href else \
href + '?apikey=' + self.provider.get('config', {}).get('apikey')
return href
def _update(self, provider, update):
self.HTTP_URL = provider.get('config', {}).get('api_url', '')
self.provider = provider
# Set the apikey parameter we're going to use it on all calls
params = dict()
params['apikey'] = provider.get('config', {}).get('apikey')
# Use the next link if one is available in the config
if provider.get('config', {}).get('next_link'):
r = self.get_url(url=provider.get('config', {}).get('next_link'), params=params,
verify=False, allow_redirects=True)
r.raise_for_status()
else:
id_list = provider.get('config', {}).get('productList', '').strip()
# If there has been a list of products defined then we format them for the request, if not all
# allowed products will be returned.
if id_list:
# we remove spaces and empty values from id_list to do a clean list
id_list = ' OR '.join([id_.strip() for id_ in id_list.split(',') if id_.strip()])
params['q'] = 'productid:(' + id_list + ') AND versioncreated:>now-1H'
else:
params['q'] = 'versioncreated:>now-1H'
params['page_size'] = '100'
r = self.get_url(params=params, verify=False, allow_redirects=True)
r.raise_for_status()
try:
response = json.loads(r.text)
except Exception:
raise IngestApiError.apiRequestError(Exception('error parsing response'))
nextLink = response.get('data', {}).get('next_page')
# Got the same next link as last time so nothing new
if nextLink == provider.get('config', {}).get('next_link'):
logger.info('Nothing new from AP Media')
return []
parser = self.get_feed_parser(provider)
parsed_items = []
for item in response.get('data', {}).get('items', []):
try:
# Get the item meta data
r = self.get_url(url=item.get('item', {}).get('uri'),
params={'apikey': provider.get('config', {}).get('apikey')}, verify=False,
allow_redirects=True)
r.raise_for_status()
complete_item = json.loads(r.text)
# Get the nitf rendition of the item
nitf_ref = complete_item.get('data', {}).get('item', {}).get('renditions', {}).get('nitf', {}).get(
'href')
if nitf_ref:
r = self.get_url(url=nitf_ref, params={'apikey': provider.get('config', {}).get('apikey')},
verify=False, allow_redirects=True)
r.raise_for_status()
root_elt = etree.fromstring(r.content)
nitf_item = nitf.NITFFeedParser().parse(root_elt)
complete_item['nitf'] = nitf_item
parsed_items.append(parser.parse(complete_item, provider))
# Any exception processing an indivisual item is swallowed
except Exception as ex:
logger.exception(ex)
# Save the link for next time
upd_provider = provider.get('config')
upd_provider['next_link'] = nextLink
update['config'] = upd_provider
return [parsed_items]
register_feeding_service(APMediaFeedingService)
[SDESK-4814] fix the feed query, request all versions, allow point in time recovery (#1725)
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
import json
from superdesk.io.registry import register_feeding_service, register_feeding_service_parser
from superdesk.io.feeding_services.http_base_service import HTTPFeedingServiceBase
from superdesk.errors import IngestApiError, SuperdeskIngestError
import requests
import superdesk
from superdesk.io.feed_parsers import nitf
from lxml import etree
from superdesk.utc import utcnow
from datetime import timedelta, datetime
logger = logging.getLogger(__name__)
class APMediaFeedingService(HTTPFeedingServiceBase):
"""
Feeding Service class which can retrieve articles from Associated Press Media API
"""
NAME = 'ap media api'
label = 'AP Media API'
fields = [
{
'id': 'api_url', 'type': 'text', 'label': 'AP Media API URL',
'required': True, 'default_value': 'https://api.ap.org/media/v/content/feed'
},
{
'id': 'products_url', 'type': 'text', 'label': 'AP Media API Products URL',
'required': True, 'default_value': 'https://api.ap.org/media/v/account/plans'
},
{
'id': 'apikey', 'type': 'text', 'label': 'API Key',
'placeholder': 'API key for access to the API', 'required': True
},
{
'id': 'productList', 'type': 'text', 'label': 'Product List',
'placeholder': 'Use coma separated product id''s for multiple products, empty for all ', 'required': False
},
{
'id': 'availableProducts', 'type': 'text', 'label': 'All Available Products',
'readonly': True
},
{
'id': 'next_link', 'type': 'text', 'label': 'Next Link',
'readonly': True
},
{
'id': 'recoverytime', 'type': 'text', 'label': 'Number of hours to recover', 'default_value': '',
'placeholder': 'Specifying a number will restart the feed from that number of hours in the past'
}
]
HTTP_AUTH = False
HTTP_TIMEOUT = 40.0
def config_test(self, provider=None):
self._get_products(provider)
original = superdesk.get_resource_service('ingest_providers').find_one(req=None, _id=provider.get('_id'))
# If there has been a change in the required products then reset the next link
if original and (original.get('config', {}).get('productList', '') != provider.get('config', {}).get(
'productList', '') or original.get('config', {}).get('recoverytime', '') !=
provider.get('config', {}).get('recoverytime', '')):
provider['config']['next_link'] = None
def _get_products(self, provider):
"""
Get the products that are available for the API Key, effectively ensuring that the key is valid and provide an
indication of the product codes available in the UI.
:param provider:
:return:
"""
api_key = provider.get('config', {}).get('apikey')
r = requests.get(provider.get('config', {}).get('products_url') + '?apikey={}'.format(api_key),
timeout=self.HTTP_TIMEOUT, verify=False, allow_redirects=True)
r.raise_for_status()
productList = []
products = json.loads(r.text)
for plan in products.get('data', {}).get('plans'):
for entitlement in plan.get('entitlements'):
productList.append('{}'.format(entitlement.get('id')))
provider['config']['availableProducts'] = ','.join(productList)
def prepare_href(self, href, mimetype=None):
href = href + '&apikey=' + self.provider.get('config', {}).get('apikey') if '?' in href else \
href + '?apikey=' + self.provider.get('config', {}).get('apikey')
return href
def _update(self, provider, update):
self.HTTP_URL = provider.get('config', {}).get('api_url', '')
self.provider = provider
# Set the apikey parameter we're going to use it on all calls
params = dict()
params['apikey'] = provider.get('config', {}).get('apikey')
# Use the next link if one is available in the config
if provider.get('config', {}).get('next_link'):
r = self.get_url(url=provider.get('config', {}).get('next_link'), params=params,
verify=False, allow_redirects=True)
r.raise_for_status()
else:
id_list = provider.get('config', {}).get('productList', '').strip()
recovery_time = provider.get('config', {}).get('recoverytime', '1')
recovery_time = recovery_time.strip() if recovery_time else ''
if recovery_time == '':
recovery_time = '1'
start = datetime.strftime(utcnow() - timedelta(hours=int(recovery_time)), '%Y-%m-%dT%H:%M:%SZ')
# If there has been a list of products defined then we format them for the request, if not all
# allowed products will be returned.
if id_list:
# we remove spaces and empty values from id_list to do a clean list
id_list = ' OR '.join([id_.strip() for id_ in id_list.split(',') if id_.strip()])
params['q'] = 'productid:(' + id_list + ') AND mindate:>{}'.format(start)
else:
params['q'] = 'mindate:>{}'.format(start)
params['page_size'] = '100'
params['versions'] = 'all'
logger.info('AP Media Start/Recovery time: {} params {}'.format(recovery_time, params))
r = self.get_url(params=params, verify=False, allow_redirects=True)
r.raise_for_status()
try:
response = json.loads(r.text)
except Exception:
raise IngestApiError.apiRequestError(Exception('error parsing response'))
nextLink = response.get('data', {}).get('next_page')
# Got the same next link as last time so nothing new
if nextLink == provider.get('config', {}).get('next_link'):
logger.info('Nothing new from AP Media')
return []
parser = self.get_feed_parser(provider)
parsed_items = []
for item in response.get('data', {}).get('items', []):
try:
# Get the item meta data
r = self.get_url(url=item.get('item', {}).get('uri'),
params={'apikey': provider.get('config', {}).get('apikey')}, verify=False,
allow_redirects=True)
logger.info('Get AP meta data for "{}" uri: {}'.format(item.get('item', {}).get('headline'),
item.get('item', {}).get('uri')))
r.raise_for_status()
complete_item = json.loads(r.text)
# Get the nitf rendition of the item
nitf_ref = complete_item.get('data', {}).get('item', {}).get('renditions', {}).get('nitf', {}).get(
'href')
if nitf_ref:
logger.info('Get AP nitf : {}'.format(nitf_ref))
r = self.get_url(url=nitf_ref, params={'apikey': provider.get('config', {}).get('apikey')},
verify=False, allow_redirects=True)
r.raise_for_status()
root_elt = etree.fromstring(r.content)
nitf_item = nitf.NITFFeedParser().parse(root_elt)
complete_item['nitf'] = nitf_item
else:
if item.get('item', {}).get('type') == 'text':
logger.warning('No NITF for story {}'.format(item.get('item', {}).get('uri')))
parsed_items.append(parser.parse(complete_item, provider))
# Any exception processing an indivisual item is swallowed
except Exception as ex:
logger.exception(ex)
# Save the link for next time
upd_provider = provider.get('config')
upd_provider['next_link'] = nextLink
upd_provider['recoverytime'] = None
update['config'] = upd_provider
return [parsed_items]
register_feeding_service(APMediaFeedingService)
|
# see tastypie documentation at http://django-tastypie.readthedocs.org/en
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.urls import resolve, get_script_prefix
from tastypie import fields
from tastypie.fields import RelatedField
from tastypie.authentication import ApiKeyAuthentication
from tastypie.authorization import Authorization
from tastypie.authorization import DjangoAuthorization
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import Unauthorized, ImmediateHttpResponse, NotFound
from tastypie.http import HttpCreated
from tastypie.resources import ModelResource, Resource
from tastypie.serializers import Serializer
from tastypie.validation import FormValidation, Validation
from django.urls.exceptions import Resolver404
from django.utils import timezone
from dojo.models import Product, Engagement, Test, Finding, \
User, ScanSettings, IPScan, Scan, Stub_Finding, Risk_Acceptance, \
Finding_Template, Test_Type, Development_Environment, \
BurpRawRequestResponse, Endpoint, Notes, JIRA_PKey, JIRA_Conf, \
JIRA_Issue, Tool_Product_Settings, Tool_Configuration, Tool_Type, \
Languages, Language_Type, App_Analysis, Product_Type
from dojo.forms import ProductForm, EngForm, TestForm, \
ScanSettingsForm, FindingForm, StubFindingForm, FindingTemplateForm, \
ImportScanForm, SEVERITY_CHOICES, JIRAForm, JIRA_PKeyForm, EditEndpointForm, \
JIRA_IssueForm, ToolConfigForm, ToolProductSettingsForm, \
ToolTypeForm, LanguagesTypeForm, Languages_TypeTypeForm, App_AnalysisTypeForm, \
Development_EnvironmentForm, Product_TypeForm, Test_TypeForm
from dojo.tools.factory import import_parser_factory
from datetime import datetime
from .object.parser import import_object_eng
"""
Setup logging for the api
logging.basicConfig(
level=logging.DEBUG,
format='[%(asctime)s] %(levelname)s [%(name)s:%(lineno)d] %(message)s',
datefmt='%d/%b/%Y %H:%M:%S',
filename=settings.DOJO_ROOT + '/../dojo.log',
)
logger = logging.getLogger(__name__)
"""
class ModelFormValidation(FormValidation):
"""
Override tastypie's standard ``FormValidation`` since this does not care
about URI to PK conversion for ``ToOneField`` or ``ToManyField``.
"""
resource = ModelResource
def __init__(self, **kwargs):
if 'resource' not in kwargs:
raise ImproperlyConfigured("You must provide a 'resource' to 'ModelFormValidation' classes.")
self.resource = kwargs.pop('resource')
super(ModelFormValidation, self).__init__(**kwargs)
def _get_pk_from_resource_uri(self, resource_field, resource_uri):
""" Return the pk of a resource URI """
base_resource_uri = resource_field.to().get_resource_uri()
if not resource_uri.startswith(base_resource_uri):
raise Exception("Couldn't match resource_uri {0} with {1}".format(resource_uri, base_resource_uri))
before, after = resource_uri.split(base_resource_uri)
return after[:-1] if after.endswith('/') else after
def form_args(self, bundle):
rsc = self.resource()
kwargs = super(ModelFormValidation, self).form_args(bundle)
for name, rel_field in list(rsc.fields.items()):
data = kwargs['data']
if not issubclass(rel_field.__class__, RelatedField):
continue # Not a resource field
if name in data and data[name] is not None:
resource_uri = (data[name] if rel_field.full is False
else data[name]['resource_uri'])
pk = self._get_pk_from_resource_uri(rel_field, resource_uri)
kwargs['data'][name] = pk
return kwargs
class BaseModelResource(ModelResource):
@classmethod
def get_fields(cls, fields=None, excludes=None):
"""
Unfortunately we must override this method because tastypie ignores
'blank' attribute on model fields.
Here we invoke an insane workaround hack due to metaclass inheritance
issues:
http://stackoverflow.com/questions/12757468/invoking-super-in-classmethod-called-from-metaclass-new
"""
this_class = next(
c for c in cls.__mro__
if c.__module__ == __name__ and c.__name__ == 'BaseModelResource')
fields = super(this_class, cls).get_fields(fields=fields,
excludes=excludes)
if not cls._meta.object_class:
return fields
for django_field in cls._meta.object_class._meta.fields:
if django_field.blank is True:
res_field = fields.get(django_field.name, None)
if res_field:
res_field.blank = True
return fields
class MultipartResource(object):
def deserialize(self, request, data, format=None):
if not format:
format = request.Meta.get('CONTENT_TYPE', 'application/json')
if format == 'application/x-www-form-urlencoded':
return request.POST
if format.startswith('multipart'):
data = request.POST.copy()
data.update(request.FILES)
return data
return super(MultipartResource, self).deserialize(request, data, format)
# Authentication class - this only allows for header auth, no url parms allowed
# like parent class.
class DojoApiKeyAuthentication(ApiKeyAuthentication):
def extract_credentials(self, request):
if (request.META.get('HTTP_AUTHORIZATION') and
request.META['HTTP_AUTHORIZATION'].lower().startswith('apikey ')):
(auth_type, data) = request.META['HTTP_AUTHORIZATION'].split()
if auth_type.lower() != 'apikey':
raise ValueError("Incorrect authorization header.")
username, api_key = data.split(':', 1)
else:
raise ValueError("Incorrect authorization header.")
return username, api_key
# Authorization class for Product
class UserProductsOnlyAuthorization(Authorization):
def read_list(self, object_list, bundle):
# This assumes a ``QuerySet`` from ``ModelResource``.
if bundle.request.user.is_staff:
return object_list
return object_list.filter(authorized_users__in=[bundle.request.user])
def read_detail(self, object_list, bundle):
# Is the requested object owned by the user?
return (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.authorized_users)
def create_list(self, object_list, bundle):
# Assuming they're auto-assigned to ``user``.
return object_list.filter(authorized_users__in=[bundle.request.user])
def create_detail(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.authorized_users)
def update_list(self, object_list, bundle):
allowed = []
# Since they may not all be saved, iterate over them.
for obj in object_list:
if (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.authorized_users):
allowed.append(obj)
return allowed
def update_detail(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.authorized_users)
def delete_list(self, object_list, bundle):
# Sorry user, no deletes for you!
raise Unauthorized("Sorry, no deletes.")
def delete_detail(self, object_list, bundle):
raise Unauthorized("Sorry, no deletes.")
# Authorization class for Scan Settings
class UserScanSettingsAuthorization(Authorization):
def read_list(self, object_list, bundle):
# This assumes a ``QuerySet`` from ``ModelResource``.
if bundle.request.user.is_staff:
return object_list
return object_list.filter(product__authorized_users__in=[
bundle.request.user])
def read_detail(self, object_list, bundle):
# Is the requested object owned by the user?
return (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.product.authorized_users)
def create_list(self, object_list, bundle):
# Assuming they're auto-assigned to ``user``.
if bundle.request.user.is_staff:
return object_list
else:
return object_list.filter(
product__authorized_users__in=[bundle.request.user])
def create_detail(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.product.authorized_users)
def update_list(self, object_list, bundle):
allowed = []
# Since they may not all be saved, iterate over them.
for obj in object_list:
if (bundle.request.user.is_staff or
bundle.request.user in
bundle.obj.product.authorized_users):
allowed.append(obj)
return allowed
def update_detail(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.product.authorized_users)
def delete_list(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.product.authorized_users)
def delete_detail(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.product.authorized_users)
# Authorization class for Scan Settings
class UserScanAuthorization(Authorization):
def read_list(self, object_list, bundle):
# This assumes a ``QuerySet`` from ``ModelResource``.
if bundle.request.user.is_staff:
return object_list
return object_list.filter(
scan_settings__product__authorized_users__in=[
bundle.request.user])
def read_detail(self, object_list, bundle):
# Is the requested object owned by the user?
return (bundle.request.user.is_staff or
bundle.request.user in
bundle.obj.scan_settings.product.authorized_users)
def create_list(self, object_list, bundle):
# Assuming they're auto-assigned to ``user``.
if bundle.request.user.is_staff:
return object_list
else:
return object_list.filter(
scan_settings__product__authorized_users__in=[
bundle.request.user])
def create_detail(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in
bundle.obj.scan_settings.product.authorized_users)
def update_list(self, object_list, bundle):
allowed = []
# Since they may not all be saved, iterate over them.
for obj in object_list:
if (bundle.request.user.is_staff or
bundle.request.user in
bundle.obj.scan_settings.product.authorized_users):
allowed.append(obj)
return allowed
def update_detail(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in
bundle.obj.scan_settings.product.authorized_users)
def delete_list(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in
bundle.obj.scan_settings.product.authorized_users)
def delete_detail(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in
bundle.obj.scan_settings.product.authorized_users)
"""
Look up resource only, no update, store, delete
"""
class UserResource(BaseModelResource):
class Meta:
queryset = User.objects.all()
resource_name = 'users'
fields = ['id', 'username', 'first_name', 'last_name', 'last_login']
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
include_resource_uri = True
filtering = {
'id': ALL,
'username': ALL,
'first_name': ALL,
'last_name': ALL
}
authorization = DjangoAuthorization()
authentication = DojoApiKeyAuthentication()
serializer = Serializer(formats=['json'])
"""
/api/v1/product_types/
GET [/id/], DELETE [/id/]
Expects: no params
Returns product_types: ALL
Relevant apply filter ?id=?
POST, PUT [/id/]
Expects *name
"""
class ProductTypeResource(BaseModelResource):
class Meta:
resource_name = 'product_types'
list_allowed_methods = ['get', 'post']
# disabled delete. Should not be allowed without fine authorization.
detail_allowed_methods = ['get', 'post', 'put']
queryset = Product_Type.objects.all().order_by('id')
include_resource_uri = True
filtering = {
'id': ALL,
'name': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=Product_TypeForm, resource=ProductTypeResource)
"""
POST, PUT
Expects *product name, *description, *prod_type [1-7]
"""
class ProductResource(BaseModelResource):
class Meta:
resource_name = 'products'
# disabled delete. Should not be allowed without fine authorization.
list_allowed_methods = ['get', 'post'] # only allow get for lists
detail_allowed_methods = ['get', 'post', 'put']
queryset = Product.objects.all().order_by('name')
ordering = ['name', 'id', 'description', 'findings_count', 'created',
'product_type_id']
excludes = ['tid', 'manager', 'prod_manager', 'tech_contact',
'updated']
include_resource_uri = True
filtering = {
'id': ALL,
'name': ALL,
'prod_type': ALL,
'created': ALL,
'findings_count': ALL
}
authentication = DojoApiKeyAuthentication()
authorization = UserProductsOnlyAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=ProductForm, resource=ProductResource)
def dehydrate(self, bundle):
# Append the tags in a comma delimited list with the tag element
"""
tags = ""
for tag in bundle.obj.tags:
tags = tags + str(tag) + ","
if len(tags) > 0:
tags = tags[:-1]
bundle.data['tags'] = tags
"""
try:
bundle.data['prod_type'] = bundle.obj.prod_type
except:
bundle.data['prod_type'] = 'unknown'
bundle.data['findings_count'] = bundle.obj.findings_count
return bundle
def obj_create(self, bundle, request=None, **kwargs):
bundle = super(ProductResource, self).obj_create(bundle)
"""
tags = bundle.data['tags']
bundle.obj.tags = tags
"""
return bundle
def obj_update(self, bundle, request=None, **kwargs):
bundle = super(ProductResource, self).obj_update(bundle, request, **kwargs)
"""
tags = bundle.data['tags']
bundle.obj.tags = tags
"""
return bundle
"""
/api/v1/tool_configurations/
GET [/id/], DELETE [/id/]
Expects: no params or id
Returns Tool_ConfigurationResource
Relevant apply filter ?test_type=?, ?id=?
POST, PUT, DLETE [/id/]
"""
class Tool_TypeResource(BaseModelResource):
class Meta:
resource_name = 'tool_types'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = Tool_Type.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'name': ALL,
'description': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=ToolTypeForm, resource=Tool_TypeResource)
"""
/api/v1/tool_configurations/
GET [/id/], DELETE [/id/]
Expects: no params or id
Returns Tool_ConfigurationResource
Relevant apply filter ?test_type=?, ?id=?
POST, PUT, DLETE [/id/]
"""
class Tool_ConfigurationResource(BaseModelResource):
tool_type = fields.ForeignKey(Tool_TypeResource, 'tool_type', full=False, null=False)
class Meta:
resource_name = 'tool_configurations'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = Tool_Configuration.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'name': ALL,
'tool_type': ALL_WITH_RELATIONS,
'tool_project_id': ALL,
'url': ALL,
'authentication_type': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=ToolConfigForm, resource=Tool_ConfigurationResource)
"""
POST, PUT [/id/]
Expects *product *target_start, *target_end, *status[In Progress, On Hold,
Completed], threat_model, pen_test, api_test, check_list
"""
class EngagementResource(BaseModelResource):
product = fields.ForeignKey(ProductResource, 'product',
full=False, null=False)
lead = fields.ForeignKey(UserResource, 'lead',
full=False, null=True)
source_code_management_server = fields.ForeignKey(Tool_ConfigurationResource, 'source_code_management_server',
full=False, null=True)
build_server = fields.ForeignKey(Tool_ConfigurationResource, 'build_server',
full=False, null=True)
orchestration_engine = fields.ForeignKey(Tool_ConfigurationResource, 'orchestration_engine',
full=False, null=True)
class Meta:
resource_name = 'engagements'
list_allowed_methods = ['get', 'post', 'patch']
# disabled delete for /id/
detail_allowed_methods = ['get', 'post', 'put', 'patch']
queryset = Engagement.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'active': ALL,
'eng_type': ALL,
'target_start': ALL,
'target_end': ALL,
'requester': ALL,
'report_type': ALL,
'updated': ALL,
'threat_model': ALL,
'api_test': ALL,
'pen_test': ALL,
'status': ALL,
'product': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=EngForm, resource=EngagementResource)
def dehydrate(self, bundle):
if bundle.obj.eng_type is not None:
bundle.data['eng_type'] = bundle.obj.eng_type.name
else:
bundle.data['eng_type'] = None
bundle.data['product_id'] = bundle.obj.product.id
bundle.data['report_type'] = bundle.obj.report_type
bundle.data['requester'] = bundle.obj.requester
return bundle
"""
/api/v1/app_analysis/
GET [/id/], DELETE [/id/]
Expects: no params or id
Returns Tool_ConfigurationResource
Relevant apply filter ?test_type=?, ?id=?
POST, PUT, DLETE [/id/]
"""
class App_AnalysisResource(BaseModelResource):
product = fields.ForeignKey(ProductResource, 'product',
full=False, null=False)
user = fields.ForeignKey(UserResource, 'user', null=False)
class Meta:
resource_name = 'app_analysis'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = App_Analysis.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'product': ALL_WITH_RELATIONS,
'user': ALL,
'confidence': ALL,
'version': ALL,
'icon': ALL,
'website': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=App_AnalysisTypeForm, resource=App_AnalysisResource)
"""
/api/v1/language_types/
GET [/id/], DELETE [/id/]
Expects: no params or id
Returns Tool_ConfigurationResource
Relevant apply filter ?test_type=?, ?id=?
POST, PUT, DLETE [/id/]
"""
class LanguageTypeResource(BaseModelResource):
class Meta:
resource_name = 'language_types'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = Language_Type.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'language': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=Languages_TypeTypeForm, resource=LanguageTypeResource)
"""
/api/v1/languages/
GET [/id/], DELETE [/id/]
Expects: no params or id
Returns Tool_ConfigurationResource
Relevant apply filter ?test_type=?, ?id=?
POST, PUT, DELETE [/id/]
"""
class LanguagesResource(BaseModelResource):
product = fields.ForeignKey(ProductResource, 'product',
full=False, null=False)
language_type = fields.ForeignKey(LanguageTypeResource, 'language', full=False, null=False)
user = fields.ForeignKey(UserResource, 'user', null=False)
class Meta:
resource_name = 'languages'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = Languages.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'files': ALL,
'language_type': ALL_WITH_RELATIONS,
'product': ALL_WITH_RELATIONS,
'user': ALL,
'blank': ALL,
'comment': ALL,
'code': ALL
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=LanguagesTypeForm, resource=LanguagesResource)
"""
/api/v1/tool_product_settings/
GET [/id/], DELETE [/id/]
Expects: no params or id
Returns ToolProductSettingsResource
Relevant apply filter ?test_type=?, ?id=?
POST, PUT, DLETE [/id/]
"""
class ToolProductSettingsResource(BaseModelResource):
product = fields.ForeignKey(ProductResource, 'product',
full=False, null=False)
tool_configuration = fields.ForeignKey(Tool_ConfigurationResource, 'tool_configuration', full=False, null=False)
class Meta:
resource_name = 'tool_product_settings'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = Tool_Product_Settings.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'name': ALL,
'product': ALL_WITH_RELATIONS,
'tool_configuration': ALL_WITH_RELATIONS,
'tool_project_id': ALL,
'url': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=ToolProductSettingsForm, resource=ToolProductSettingsResource)
"""
/api/v1/endpoints/
GET [/id/], DELETE [/id/]
Expects: no params or endpoint id
Returns endpoint
Relevant apply filter ?test_type=?, ?id=?
POST, PUT, DLETE [/id/]
"""
class EndpointResource(BaseModelResource):
product = fields.ForeignKey(ProductResource, 'product',
full=False, null=False)
class Meta:
resource_name = 'endpoints'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = Endpoint.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'host': ALL,
'product': ALL_WITH_RELATIONS,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=EditEndpointForm, resource=EndpointResource)
"""
/api/v1/jira_configurations/
GET [/id/], DELETE [/id/]
Expects: no params or JIRA_PKey
Returns jira configuration: ALL or by JIRA_PKey
POST, PUT [/id/]
"""
class JIRA_IssueResource(BaseModelResource):
class Meta:
resource_name = 'jira_finding_mappings'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = JIRA_Issue.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'jira_id': ALL,
'jira_key': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=JIRA_IssueForm, resource=JIRA_IssueResource)
"""
/api/v1/jira_configurations/
GET [/id/], DELETE [/id/]
Expects: no params or JIRA_PKey
Returns jira configuration: ALL or by JIRA_PKey
POST, PUT [/id/]
"""
class JIRA_ConfResource(BaseModelResource):
class Meta:
resource_name = 'jira_configurations'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = JIRA_Conf.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'url': ALL
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=JIRAForm, resource=JIRA_ConfResource)
"""
/api/v1/jira/
GET [/id/], DELETE [/id/]
Expects: no params or jira product key
POST, PUT, DELETE [/id/]
"""
class JiraResource(BaseModelResource):
product = fields.ForeignKey(ProductResource, 'product',
full=False, null=False)
conf = fields.ForeignKey(JIRA_ConfResource, 'conf',
full=False, null=True)
class Meta:
resource_name = 'jira_product_configurations'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = JIRA_PKey.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'conf': ALL,
'product': ALL_WITH_RELATIONS,
'component': ALL,
'project_key': ALL,
'push_all_issues': ALL,
'enable_engagement_epic_mapping': ALL,
'push_notes': ALL
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=JIRA_PKeyForm, resource=JiraResource)
"""
/api/v1/environments/
GET [/id/]
Expects: no params
Returns environments: ALL
Relevant apply filter ?id=?
POST, PUT [/id/]
Expects *name
"""
class DevelopmentEnvironmentResource(BaseModelResource):
class Meta:
resource_name = 'development_environments'
list_allowed_methods = ['get', 'post']
# disabled delete. Should not be allowed without fine authorization.
detail_allowed_methods = ['get', 'post', 'put']
queryset = Development_Environment.objects.all().order_by('id')
include_resource_uri = True
filtering = {
'id': ALL,
'name': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=Development_EnvironmentForm, resource=DevelopmentEnvironmentResource)
"""
/api/v1/test_types/
GET [/id/]
Expects: no params
Returns environments: ALL
Relevant apply filter ?id=?
POST, PUT [/id/]
Expects *name
"""
class TestTypeResource(BaseModelResource):
class Meta:
resource_name = 'test_types'
list_allowed_methods = ['get', 'post']
# disabled delete. Should not be allowed without fine authorization.
detail_allowed_methods = ['get', 'post', 'put']
queryset = Test_Type.objects.all().order_by('id')
include_resource_uri = True
filtering = {
'id': ALL,
'name': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=Test_TypeForm, resource=TestTypeResource)
"""
/api/v1/tests/
GET [/id/], DELETE [/id/]
Expects: no params or engagement_id
Returns test: ALL or by engagement_id
Relevant apply filter ?test_type=?, ?id=?
POST, PUT [/id/]
Expects *test_type, *engagement, *target_start, *target_end,
estimated_time, actual_time, percent_complete, notes
"""
class TestResource(BaseModelResource):
engagement = fields.ForeignKey(EngagementResource, 'engagement',
full=False, null=False)
class Meta:
resource_name = 'tests'
list_allowed_methods = ['get', 'post']
# disabled delete. Should not be allowed without fine authorization.
detail_allowed_methods = ['get', 'post', 'put']
queryset = Test.objects.all().order_by('target_end')
include_resource_uri = True
filtering = {
'id': ALL,
'test_type': ALL,
'target_start': ALL,
'target_end': ALL,
'notes': ALL,
'percent_complete': ALL,
'actual_time': ALL,
'engagement': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=TestForm, resource=TestResource)
def dehydrate(self, bundle):
bundle.data['test_type'] = bundle.obj.test_type
return bundle
class RiskAcceptanceResource(BaseModelResource):
class Meta:
resource_name = 'risk_acceptances'
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
queryset = Risk_Acceptance.objects.all().order_by('created')
"""
/api/v1/findings/
GET [/id/], DELETE [/id/]
Expects: no params or test_id
Returns test: ALL or by test_id
Relevant apply filter ?active=True, ?id=?, ?severity=?
POST, PUT [/id/]
Expects *title, *date, *severity, *description, *mitigation, *impact,
*endpoint, *test, cwe, active, false_p, verified,
mitigated, *reporter
"""
class FindingResource(BaseModelResource):
reporter = fields.ForeignKey(UserResource, 'reporter', null=False)
test = fields.ForeignKey(TestResource, 'test', null=False)
# risk_acceptance = fields.ManyToManyField(RiskAcceptanceResource, 'risk_acceptance', full=True, null=True)
product = fields.ForeignKey(ProductResource, 'test__engagement__product', full=False, null=False)
engagement = fields.ForeignKey(EngagementResource, 'test__engagement', full=False, null=False)
class Meta:
resource_name = 'findings'
queryset = Finding.objects.select_related("test")
# deleting of findings is not allowed via API.
# Admin interface can be used for this.
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'post', 'put']
include_resource_uri = True
filtering = {
'id': ALL,
'title': ALL,
'date': ALL,
'severity': ALL,
'description': ALL,
'mitigated': ALL,
'endpoint': ALL,
'test': ALL_WITH_RELATIONS,
'active': ALL,
'verified': ALL,
'false_p': ALL,
'reporter': ALL,
'url': ALL,
'out_of_scope': ALL,
'duplicate': ALL,
# 'risk_acceptance': ALL_WITH_RELATIONS,
'engagement': ALL_WITH_RELATIONS,
'product': ALL_WITH_RELATIONS
# 'build_id': ALL
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=FindingForm, resource=FindingResource)
def dehydrate(self, bundle):
engagement = Engagement.objects.select_related('product'). \
filter(test__finding__id=bundle.obj.id)
bundle.data['engagement'] = "/api/v1/engagements/%s/" % engagement[0].id
bundle.data['product'] = \
"/api/v1/products/%s/" % engagement[0].product.id
return bundle
"""
/api/v1/finding_templates/
GET [/id/], DELETE [/id/]
Expects: no params or test_id
Returns test: ALL or by test_id
Relevant apply filter ?active=True, ?id=?, ?severity=?
POST, PUT [/id/]
Expects *title, *severity, *description, *mitigation, *impact,
*endpoint, *test, cwe, active, false_p, verified,
mitigated, *reporter
"""
class FindingTemplateResource(BaseModelResource):
class Meta:
resource_name = 'finding_templates'
queryset = Finding_Template.objects.all()
excludes = ['numerical_severity']
# deleting of Finding_Template is not allowed via API.
# Admin interface can be used for this.
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'post', 'put']
include_resource_uri = True
"""
title = models.TextField(max_length=1000)
cwe = models.IntegerField(default=None, null=True, blank=True)
severity = models.CharField(max_length=200, null=True, blank=True)
description = models.TextField(null=True, blank=True)
mitigation = models.TextField(null=True, blank=True)
impact = models.TextField(null=True, blank=True)
references = models.TextField(null=True, blank=True, db_column="refs")
numerical_severity
"""
filtering = {
'id': ALL,
'title': ALL,
'cwe': ALL,
'severity': ALL,
'description': ALL,
'mitigated': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=FindingTemplateForm, resource=FindingTemplateResource)
class StubFindingResource(BaseModelResource):
reporter = fields.ForeignKey(UserResource, 'reporter', null=False)
test = fields.ForeignKey(TestResource, 'test', null=False)
class Meta:
resource_name = 'stub_findings'
queryset = Stub_Finding.objects.select_related("test")
# deleting of findings is not allowed via UI or API.
# Admin interface can be used for this.
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'post', 'put']
include_resource_uri = True
filtering = {
'id': ALL,
'title': ALL,
'date': ALL,
'severity': ALL,
'description': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=StubFindingForm, resource=StubFindingResource)
def dehydrate(self, bundle):
engagement = Engagement.objects.select_related('product'). \
filter(test__stub_finding__id=bundle.obj.id)
bundle.data['engagement'] = "/api/v1/engagements/%s/" % engagement[0].id
bundle.data['product'] = \
"/api/v1/products/%s/" % engagement[0].product.id
return bundle
'''
/api/v1/scansettings/
GET [/id/], DELETE [/id/]
Expects: no params or product_id
Returns test: ALL or by product_id
POST, PUT [/id/]
Expects *addresses, *user, *date, *frequency, *email, *product
'''
class ScanSettingsResource(BaseModelResource):
user = fields.ForeignKey(UserResource, 'user', null=False)
product = fields.ForeignKey(ProductResource, 'product', null=False)
class Meta:
resource_name = 'scan_settings'
queryset = ScanSettings.objects.all()
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'put', 'post', 'delete']
include_resource_uri = True
filtering = {
'id': ALL,
'date': ALL,
'user': ALL,
'frequency': ALL,
'product': ALL,
'addresses': ALL
}
authentication = DojoApiKeyAuthentication()
authorization = UserScanSettingsAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=ScanSettingsForm, resource=ScanSettingsResource)
"""
/api/v1/ipscans/
Not exposed via API - but used as part of
ScanResource return values
"""
class IPScanResource(BaseModelResource):
class Meta:
resource_name = 'ipscans'
queryset = IPScan.objects.all()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
include_resource_uri = True
filtering = {
'id': ALL,
'address': ALL,
'services': ALL,
'scan': ALL
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
"""
/api/v1/scans/
GET [/id/], DELETE [/id/]
Expects: no params
Returns scans: ALL
Relevant filters: ?scan_setting=?
"""
class ScanResource(BaseModelResource):
scan_settings = fields.ForeignKey(ScanSettingsResource,
'scan_settings',
null=False)
ipscans = fields.ToManyField(
IPScanResource,
attribute=lambda bundle: IPScan.objects.filter(
scan__id=bundle.obj.id) if IPScan.objects.filter(
scan__id=bundle.obj.id) != [] else [], full=True, null=True)
class Meta:
resource_name = 'scans'
queryset = Scan.objects.all()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
include_resource_uri = True
filtering = {
'id': ALL,
'date': ALL,
'scan_settings': ALL
}
authentication = DojoApiKeyAuthentication()
authorization = UserScanAuthorization()
serializer = Serializer(formats=['json'])
# Method used to get Private Key from uri, Used in the ImportScan and ReImportScan resources
def get_pk_from_uri(uri):
prefix = get_script_prefix()
chomped_uri = uri
if prefix and chomped_uri.startswith(prefix):
chomped_uri = chomped_uri[len(prefix) - 1:]
try:
view, args, kwargs = resolve(chomped_uri.replace('//', '/'))
except Resolver404:
raise NotFound("The URL provided '%s' was not a link to a valid resource." % uri)
return kwargs['pk']
"""
/api/v1/importscan/
POST
Expects file, scan_date, scan_type, tags, active, engagement
"""
# Create an Object that will store all the information sent to the endpoint
class ImportScanObject(object):
def __init__(self, initial=None):
self.__dict__['_data'] = {}
if initial:
self.update(initial)
def __getattr__(self, name):
return self._data.get(name, None)
def __setattr__(self, name, value):
self.__dict__['_data'][name] = value
def update(self, other):
for k in other:
self.__setattr__(k, other[k])
def to_dict(self):
return self._data
# The default form validation was buggy so I implemented a custom validation class
class ImportScanValidation(Validation):
def is_valid(self, bundle, request=None):
if not bundle.data:
return {'__all__': 'You didn\'t seem to pass anything in.'}
errors = {}
# Make sure file is present
if 'file' not in bundle.data:
errors.setdefault('file', []).append('You must pass a file in to be imported')
# Make sure scan_date matches required format
if 'scan_date' in bundle.data:
try:
datetime.strptime(bundle.data['scan_date'], '%Y-%m-%d')
except ValueError:
errors.setdefault('scan_date', []).append("Incorrect scan_date format, should be YYYY-MM-DD")
# Make sure scan_type and minimum_severity have valid options
if 'engagement' not in bundle.data:
errors.setdefault('engagement', []).append('engagement must be given')
else:
# verify the engagement is valid
try:
get_pk_from_uri(uri=bundle.data['engagement'])
except NotFound:
errors.setdefault('engagement', []).append('A valid engagement must be supplied. Ex. /api/v1/engagements/1/')
scan_type_list = list([x[0] for x in ImportScanForm.SCAN_TYPE_CHOICES])
if 'scan_type' in bundle.data:
if bundle.data['scan_type'] not in scan_type_list:
errors.setdefault('scan_type', []).append('scan_type must be one of the following: ' + ', '.join(scan_type_list))
else:
errors.setdefault('scan_type', []).append('A scan_type must be given so we know how to import the scan file.')
try:
if 'test_type' in bundle.data:
Test_Type.objects.get(name=bundle.data.get('test_type'))
else:
bundle.data['test_type'] = bundle.data.get('scan_type')
except Test_Type.DoesNotExist:
errors.setdefault('test_type', []).append(
'test_type must be one of the following: ' +
', '.join(Test_Type.objects.values_list("name", flat=True)))
severity_list = list([x[0] for x in SEVERITY_CHOICES])
if 'minimum_severity' in bundle.data:
if bundle.data['minimum_severity'] not in severity_list:
errors.setdefault('minimum_severity', []).append('minimum_severity must be one of the following: ' + ', '.join(severity_list))
# Make sure active and verified are booleans
if 'active' in bundle.data:
if bundle.data['active'] in ['false', 'False', '0']:
bundle.data['active'] = False
elif bundle.data['active'] in ['true', 'True', '1']:
bundle.data['active'] = True
if not isinstance(bundle.data['active'], bool):
errors.setdefault('active', []).append('active must be a boolean')
if 'verified' in bundle.data:
if bundle.data['verified'] in ['false', 'False', '0']:
bundle.data['verified'] = False
elif bundle.data['verified'] in ['true', 'True', '1']:
bundle.data['verified'] = True
if not isinstance(bundle.data['verified'], bool):
errors.setdefault('verified', []).append('verified must be a boolean')
return errors
class BuildDetails(MultipartResource, Resource):
file = fields.FileField(attribute='file')
engagement = fields.CharField(attribute='engagement')
class Meta:
resource_name = 'build_details'
fields = ['engagement', 'file']
list_allowed_methods = ['post']
detail_allowed_methods = []
include_resource_uri = True
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
object_class = ImportScanObject
def hydrate(self, bundle):
bundle.obj.__setattr__('engagement_obj',
Engagement.objects.get(id=get_pk_from_uri(bundle.data['engagement'])))
return bundle
def obj_create(self, bundle, **kwargs):
bundle.obj = ImportScanObject(initial=kwargs)
self.is_valid(bundle)
if bundle.errors:
raise ImmediateHttpResponse(response=self.error_response(bundle.request, bundle.errors))
bundle = self.full_hydrate(bundle)
import_object_eng(bundle.request, bundle.obj.__getattr__('engagement_obj'), bundle.data['file'])
class ImportScanResource(MultipartResource, Resource):
scan_date = fields.DateTimeField(attribute='scan_date')
minimum_severity = fields.CharField(attribute='minimum_severity')
active = fields.BooleanField(attribute='active')
verified = fields.BooleanField(attribute='verified')
scan_type = fields.CharField(attribute='scan_type')
test_type = fields.CharField(attribute='test_type')
tags = fields.CharField(attribute='tags')
file = fields.FileField(attribute='file')
engagement = fields.CharField(attribute='engagement')
lead = fields.CharField(attribute='lead')
class Meta:
resource_name = 'importscan'
fields = ['scan_date', 'minimum_severity', 'active', 'verified', 'scan_type', 'tags', 'file', 'lead']
list_allowed_methods = ['post']
detail_allowed_methods = []
include_resource_uri = True
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
validation = ImportScanValidation()
object_class = ImportScanObject
def hydrate(self, bundle):
if 'scan_date' not in bundle.data:
bundle.data['scan_date'] = datetime.now().strftime("%Y/%m/%d")
if 'minimum_severity' not in bundle.data:
bundle.data['minimum_severity'] = "Info"
if 'active' not in bundle.data:
bundle.data['active'] = True
if 'verified' not in bundle.data:
bundle.data['verified'] = True
if 'tags' not in bundle.data:
bundle.data['tags'] = ""
if 'lead' in bundle.data:
bundle.obj.__setattr__('user_obj',
User.objects.get(id=get_pk_from_uri(bundle.data['lead'])))
bundle.obj.__setattr__('engagement_obj',
Engagement.objects.get(id=get_pk_from_uri(bundle.data['engagement'])))
return bundle
def detail_uri_kwargs(self, bundle_or_obj):
kwargs = {}
return kwargs
def obj_create(self, bundle, **kwargs):
bundle.obj = ImportScanObject(initial=kwargs)
self.is_valid(bundle)
if bundle.errors:
raise ImmediateHttpResponse(response=self.error_response(bundle.request, bundle.errors))
bundle = self.full_hydrate(bundle)
# We now have all the options we need and will just replicate the process in views.py
tt, t_created = Test_Type.objects.get_or_create(name=bundle.data.get('test_type', bundle.data['scan_type']))
# will save in development environment
environment, env_created = Development_Environment.objects.get_or_create(name="Development")
scan_date = datetime.strptime(bundle.data['scan_date'], '%Y-%m-%d')
t = Test(engagement=bundle.obj.__getattr__('engagement_obj'), lead=bundle.obj.__getattr__('user_obj'), test_type=tt, target_start=scan_date,
target_end=scan_date, environment=environment, percent_complete=100)
try:
t.full_clean()
except ValidationError as e:
print("Error Validating Test Object")
print(e)
t.save()
t.tags = bundle.data['tags']
try:
parser = import_parser_factory(bundle.data['file'], t, bundle.data['scan_type'])
except ValueError:
raise NotFound("Parser ValueError")
try:
for item in parser.items:
sev = item.severity
if sev == 'Information' or sev == 'Informational':
sev = 'Info'
item.severity = sev
if Finding.SEVERITIES[sev] > Finding.SEVERITIES[bundle.data['minimum_severity']]:
continue
item.test = t
item.date = t.target_start
item.reporter = bundle.request.user
item.last_reviewed = timezone.now()
item.last_reviewed_by = bundle.request.user
item.active = bundle.data['active']
item.verified = bundle.data['verified']
item.save(dedupe_option=False)
if hasattr(item, 'unsaved_req_resp') and len(item.unsaved_req_resp) > 0:
for req_resp in item.unsaved_req_resp:
burp_rr = BurpRawRequestResponse(finding=item,
burpRequestBase64=req_resp["req"],
burpResponseBase64=req_resp["resp"],
)
burp_rr.clean()
burp_rr.save()
if item.unsaved_request is not None and item.unsaved_response is not None:
burp_rr = BurpRawRequestResponse(finding=item,
burpRequestBase64=item.unsaved_request,
burpResponseBase64=item.unsaved_response,
)
burp_rr.clean()
burp_rr.save()
for endpoint in item.unsaved_endpoints:
ep, created = Endpoint.objects.get_or_create(protocol=endpoint.protocol,
host=endpoint.host,
path=endpoint.path,
query=endpoint.query,
fragment=endpoint.fragment,
product=t.engagement.product)
item.endpoints.add(ep)
item.save()
if item.unsaved_tags is not None:
item.tags = item.unsaved_tags
except SyntaxError:
raise NotFound("Parser SyntaxError")
# Everything executed fine. We successfully imported the scan.
res = TestResource()
uri = res.get_resource_uri(t)
raise ImmediateHttpResponse(HttpCreated(location=uri))
# The default form validation was buggy so I implemented a custom validation class
class ReImportScanValidation(Validation):
def is_valid(self, bundle, request=None):
if not bundle.data:
return {'__all__': 'You didn\'t seem to pass anything in.'}
errors = {}
# Make sure file is present
if 'file' not in bundle.data:
errors.setdefault('file', []).append('You must pass a file in to be imported')
# Make sure scan_date matches required format
if 'scan_date' in bundle.data:
try:
datetime.strptime(bundle.data['scan_date'], '%Y/%m/%d')
except ValueError:
errors.setdefault('scan_date', []).append("Incorrect scan_date format, should be YYYY/MM/DD")
# Make sure scan_type and minimum_severity have valid options
if 'test' not in bundle.data:
errors.setdefault('test', []).append('test must be given')
else:
# verify the test is valid
try:
get_pk_from_uri(uri=bundle.data['test'])
except NotFound:
errors.setdefault('test', []).append('A valid test must be supplied. Ex. /api/v1/tests/1/')
scan_type_list = list([x[0] for x in ImportScanForm.SCAN_TYPE_CHOICES])
if 'scan_type' in bundle.data:
if bundle.data['scan_type'] not in scan_type_list:
errors.setdefault('scan_type', []).append('scan_type must be one of the following: ' + ', '.join(scan_type_list))
else:
errors.setdefault('scan_type', []).append('A scan_type must be given so we know how to import the scan file.')
severity_list = list([x[0] for x in SEVERITY_CHOICES])
if 'minimum_severity' in bundle.data:
if bundle.data['minimum_severity'] not in severity_list:
errors.setdefault('minimum_severity', []).append('minimum_severity must be one of the following: ' + ', '.join(severity_list))
# Make sure active and verified are booleans
if 'active' in bundle.data:
if bundle.data['active'] in ['false', 'False', '0']:
bundle.data['active'] = False
elif bundle.data['active'] in ['true', 'True', '1']:
bundle.data['active'] = True
if not isinstance(bundle.data['active'], bool):
errors.setdefault('active', []).append('active must be a boolean')
if 'verified' in bundle.data:
if bundle.data['verified'] in ['false', 'False', '0']:
bundle.data['verified'] = False
elif bundle.data['verified'] in ['true', 'True', '1']:
bundle.data['verified'] = True
if not isinstance(bundle.data['verified'], bool):
errors.setdefault('verified', []).append('verified must be a boolean')
return errors
class ReImportScanResource(MultipartResource, Resource):
scan_date = fields.DateTimeField(attribute='scan_date')
minimum_severity = fields.CharField(attribute='minimum_severity')
active = fields.BooleanField(attribute='active')
verified = fields.BooleanField(attribute='verified')
scan_type = fields.CharField(attribute='scan_type')
tags = fields.CharField(attribute='tags')
file = fields.FileField(attribute='file')
test = fields.CharField(attribute='test')
class Meta:
resource_name = 'reimportscan'
fields = ['scan_date', 'minimum_severity', 'active', 'verified', 'scan_type', 'tags', 'file']
list_allowed_methods = ['post']
detail_allowed_methods = []
include_resource_uri = True
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
validation = ReImportScanValidation()
object_class = ImportScanObject
def hydrate(self, bundle):
if 'scan_date' not in bundle.data:
bundle.data['scan_date'] = datetime.now().strftime("%Y/%m/%d")
if 'minimum_severity' not in bundle.data:
bundle.data['minimum_severity'] = "Info"
if 'active' not in bundle.data:
bundle.data['active'] = True
if 'verified' not in bundle.data:
bundle.data['verified'] = True
if 'tags' not in bundle.data:
bundle.data['tags'] = ""
bundle.obj.__setattr__('test_obj',
Test.objects.get(id=get_pk_from_uri(bundle.data['test'])))
return bundle
def detail_uri_kwargs(self, bundle_or_obj):
kwargs = {}
return kwargs
def obj_create(self, bundle, **kwargs):
bundle.obj = ImportScanObject(initial=kwargs)
self.is_valid(bundle)
if bundle.errors:
raise ImmediateHttpResponse(response=self.error_response(bundle.request, bundle.errors))
bundle = self.full_hydrate(bundle)
test = bundle.obj.__getattr__('test_obj')
scan_type = bundle.obj.__getattr__('scan_type')
min_sev = bundle.obj.__getattr__('minimum_severity')
scan_date = bundle.obj.__getattr__('scan_date')
verified = bundle.obj.__getattr__('verified')
active = bundle.obj.__getattr__('active')
try:
parser = import_parser_factory(bundle.data['file'], test, scan_type)
except ValueError:
raise NotFound("Parser ValueError")
try:
items = parser.items
original_items = test.finding_set.all().values_list("id", flat=True)
new_items = []
mitigated_count = 0
finding_count = 0
finding_added_count = 0
reactivated_count = 0
for item in items:
sev = item.severity
if sev == 'Information' or sev == 'Informational':
sev = 'Info'
if Finding.SEVERITIES[sev] > Finding.SEVERITIES[min_sev]:
continue
if scan_type == 'Veracode Scan' or scan_type == 'Arachni Scan':
find = Finding.objects.filter(title=item.title,
test__id=test.id,
severity=sev,
numerical_severity=Finding.get_numerical_severity(sev),
description=item.description
)
else:
find = Finding.objects.filter(title=item.title,
test__id=test.id,
severity=sev,
numerical_severity=Finding.get_numerical_severity(sev),
)
if len(find) == 1:
find = find[0]
if find.mitigated:
# it was once fixed, but now back
find.mitigated = None
find.mitigated_by = None
find.active = True
find.verified = verified
find.save()
note = Notes(entry="Re-activated by %s re-upload." % scan_type,
author=bundle.request.user)
note.save()
find.notes.add(note)
reactivated_count += 1
new_items.append(find.id)
else:
item.test = test
item.date = test.target_start
item.reporter = bundle.request.user
item.last_reviewed = timezone.now()
item.last_reviewed_by = bundle.request.user
item.verified = verified
item.active = active
item.save()
finding_added_count += 1
new_items.append(item.id)
find = item
if hasattr(item, 'unsaved_req_resp') and len(item.unsaved_req_resp) > 0:
for req_resp in item.unsaved_req_resp:
burp_rr = BurpRawRequestResponse(finding=find,
burpRequestBase64=req_resp["req"],
burpResponseBase64=req_resp["resp"],
)
burp_rr.clean()
burp_rr.save()
if item.unsaved_request is not None and item.unsaved_response is not None:
burp_rr = BurpRawRequestResponse(finding=find,
burpRequestBase64=item.unsaved_request,
burpResponseBase64=item.unsaved_response,
)
burp_rr.clean()
burp_rr.save()
if find:
finding_count += 1
for endpoint in item.unsaved_endpoints:
ep, created = Endpoint.objects.get_or_create(protocol=endpoint.protocol,
host=endpoint.host,
path=endpoint.path,
query=endpoint.query,
fragment=endpoint.fragment,
product=test.engagement.product)
find.endpoints.add(ep)
if item.unsaved_tags is not None:
find.tags = item.unsaved_tags
# calculate the difference
to_mitigate = set(original_items) - set(new_items)
for finding_id in to_mitigate:
finding = Finding.objects.get(id=finding_id)
finding.mitigated = datetime.combine(scan_date, timezone.now().time())
finding.mitigated_by = bundle.request.user
finding.active = False
finding.save()
note = Notes(entry="Mitigated by %s re-upload." % scan_type,
author=bundle.request.user)
note.save()
finding.notes.add(note)
mitigated_count += 1
except SyntaxError:
raise NotFound("Parser SyntaxError")
# Everything executed fine. We successfully imported the scan.
raise ImmediateHttpResponse(HttpCreated(location=bundle.obj.__getattr__('test')))
API ImportScan Fix
# see tastypie documentation at http://django-tastypie.readthedocs.org/en
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.urls import resolve, get_script_prefix
from tastypie import fields
from tastypie.fields import RelatedField
from tastypie.authentication import ApiKeyAuthentication
from tastypie.authorization import Authorization
from tastypie.authorization import DjangoAuthorization
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import Unauthorized, ImmediateHttpResponse, NotFound
from tastypie.http import HttpCreated
from tastypie.resources import ModelResource, Resource
from tastypie.serializers import Serializer
from tastypie.validation import FormValidation, Validation
from django.urls.exceptions import Resolver404
from django.utils import timezone
from dojo.models import Product, Engagement, Test, Finding, \
User, ScanSettings, IPScan, Scan, Stub_Finding, Risk_Acceptance, \
Finding_Template, Test_Type, Development_Environment, \
BurpRawRequestResponse, Endpoint, Notes, JIRA_PKey, JIRA_Conf, \
JIRA_Issue, Tool_Product_Settings, Tool_Configuration, Tool_Type, \
Languages, Language_Type, App_Analysis, Product_Type
from dojo.forms import ProductForm, EngForm, TestForm, \
ScanSettingsForm, FindingForm, StubFindingForm, FindingTemplateForm, \
ImportScanForm, SEVERITY_CHOICES, JIRAForm, JIRA_PKeyForm, EditEndpointForm, \
JIRA_IssueForm, ToolConfigForm, ToolProductSettingsForm, \
ToolTypeForm, LanguagesTypeForm, Languages_TypeTypeForm, App_AnalysisTypeForm, \
Development_EnvironmentForm, Product_TypeForm, Test_TypeForm
from dojo.tools.factory import import_parser_factory
from datetime import datetime
from .object.parser import import_object_eng
"""
Setup logging for the api
logging.basicConfig(
level=logging.DEBUG,
format='[%(asctime)s] %(levelname)s [%(name)s:%(lineno)d] %(message)s',
datefmt='%d/%b/%Y %H:%M:%S',
filename=settings.DOJO_ROOT + '/../dojo.log',
)
logger = logging.getLogger(__name__)
"""
class ModelFormValidation(FormValidation):
"""
Override tastypie's standard ``FormValidation`` since this does not care
about URI to PK conversion for ``ToOneField`` or ``ToManyField``.
"""
resource = ModelResource
def __init__(self, **kwargs):
if 'resource' not in kwargs:
raise ImproperlyConfigured("You must provide a 'resource' to 'ModelFormValidation' classes.")
self.resource = kwargs.pop('resource')
super(ModelFormValidation, self).__init__(**kwargs)
def _get_pk_from_resource_uri(self, resource_field, resource_uri):
""" Return the pk of a resource URI """
base_resource_uri = resource_field.to().get_resource_uri()
if not resource_uri.startswith(base_resource_uri):
raise Exception("Couldn't match resource_uri {0} with {1}".format(resource_uri, base_resource_uri))
before, after = resource_uri.split(base_resource_uri)
return after[:-1] if after.endswith('/') else after
def form_args(self, bundle):
rsc = self.resource()
kwargs = super(ModelFormValidation, self).form_args(bundle)
for name, rel_field in list(rsc.fields.items()):
data = kwargs['data']
if not issubclass(rel_field.__class__, RelatedField):
continue # Not a resource field
if name in data and data[name] is not None:
resource_uri = (data[name] if rel_field.full is False
else data[name]['resource_uri'])
pk = self._get_pk_from_resource_uri(rel_field, resource_uri)
kwargs['data'][name] = pk
return kwargs
class BaseModelResource(ModelResource):
@classmethod
def get_fields(cls, fields=None, excludes=None):
"""
Unfortunately we must override this method because tastypie ignores
'blank' attribute on model fields.
Here we invoke an insane workaround hack due to metaclass inheritance
issues:
http://stackoverflow.com/questions/12757468/invoking-super-in-classmethod-called-from-metaclass-new
"""
this_class = next(
c for c in cls.__mro__
if c.__module__ == __name__ and c.__name__ == 'BaseModelResource')
fields = super(this_class, cls).get_fields(fields=fields,
excludes=excludes)
if not cls._meta.object_class:
return fields
for django_field in cls._meta.object_class._meta.fields:
if django_field.blank is True:
res_field = fields.get(django_field.name, None)
if res_field:
res_field.blank = True
return fields
class MultipartResource(object):
def deserialize(self, request, data, format=None):
if not format:
format = request.Meta.get('CONTENT_TYPE', 'application/json')
if format == 'application/x-www-form-urlencoded':
return request.POST
if format.startswith('multipart'):
data = request.POST.copy()
data.update(request.FILES)
return data
return super(MultipartResource, self).deserialize(request, data, format)
# Authentication class - this only allows for header auth, no url parms allowed
# like parent class.
class DojoApiKeyAuthentication(ApiKeyAuthentication):
def extract_credentials(self, request):
if (request.META.get('HTTP_AUTHORIZATION') and
request.META['HTTP_AUTHORIZATION'].lower().startswith('apikey ')):
(auth_type, data) = request.META['HTTP_AUTHORIZATION'].split()
if auth_type.lower() != 'apikey':
raise ValueError("Incorrect authorization header.")
username, api_key = data.split(':', 1)
else:
raise ValueError("Incorrect authorization header.")
return username, api_key
# Authorization class for Product
class UserProductsOnlyAuthorization(Authorization):
def read_list(self, object_list, bundle):
# This assumes a ``QuerySet`` from ``ModelResource``.
if bundle.request.user.is_staff:
return object_list
return object_list.filter(authorized_users__in=[bundle.request.user])
def read_detail(self, object_list, bundle):
# Is the requested object owned by the user?
return (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.authorized_users)
def create_list(self, object_list, bundle):
# Assuming they're auto-assigned to ``user``.
return object_list.filter(authorized_users__in=[bundle.request.user])
def create_detail(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.authorized_users)
def update_list(self, object_list, bundle):
allowed = []
# Since they may not all be saved, iterate over them.
for obj in object_list:
if (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.authorized_users):
allowed.append(obj)
return allowed
def update_detail(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.authorized_users)
def delete_list(self, object_list, bundle):
# Sorry user, no deletes for you!
raise Unauthorized("Sorry, no deletes.")
def delete_detail(self, object_list, bundle):
raise Unauthorized("Sorry, no deletes.")
# Authorization class for Scan Settings
class UserScanSettingsAuthorization(Authorization):
def read_list(self, object_list, bundle):
# This assumes a ``QuerySet`` from ``ModelResource``.
if bundle.request.user.is_staff:
return object_list
return object_list.filter(product__authorized_users__in=[
bundle.request.user])
def read_detail(self, object_list, bundle):
# Is the requested object owned by the user?
return (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.product.authorized_users)
def create_list(self, object_list, bundle):
# Assuming they're auto-assigned to ``user``.
if bundle.request.user.is_staff:
return object_list
else:
return object_list.filter(
product__authorized_users__in=[bundle.request.user])
def create_detail(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.product.authorized_users)
def update_list(self, object_list, bundle):
allowed = []
# Since they may not all be saved, iterate over them.
for obj in object_list:
if (bundle.request.user.is_staff or
bundle.request.user in
bundle.obj.product.authorized_users):
allowed.append(obj)
return allowed
def update_detail(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.product.authorized_users)
def delete_list(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.product.authorized_users)
def delete_detail(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in bundle.obj.product.authorized_users)
# Authorization class for Scan Settings
class UserScanAuthorization(Authorization):
def read_list(self, object_list, bundle):
# This assumes a ``QuerySet`` from ``ModelResource``.
if bundle.request.user.is_staff:
return object_list
return object_list.filter(
scan_settings__product__authorized_users__in=[
bundle.request.user])
def read_detail(self, object_list, bundle):
# Is the requested object owned by the user?
return (bundle.request.user.is_staff or
bundle.request.user in
bundle.obj.scan_settings.product.authorized_users)
def create_list(self, object_list, bundle):
# Assuming they're auto-assigned to ``user``.
if bundle.request.user.is_staff:
return object_list
else:
return object_list.filter(
scan_settings__product__authorized_users__in=[
bundle.request.user])
def create_detail(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in
bundle.obj.scan_settings.product.authorized_users)
def update_list(self, object_list, bundle):
allowed = []
# Since they may not all be saved, iterate over them.
for obj in object_list:
if (bundle.request.user.is_staff or
bundle.request.user in
bundle.obj.scan_settings.product.authorized_users):
allowed.append(obj)
return allowed
def update_detail(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in
bundle.obj.scan_settings.product.authorized_users)
def delete_list(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in
bundle.obj.scan_settings.product.authorized_users)
def delete_detail(self, object_list, bundle):
return (bundle.request.user.is_staff or
bundle.request.user in
bundle.obj.scan_settings.product.authorized_users)
"""
Look up resource only, no update, store, delete
"""
class UserResource(BaseModelResource):
class Meta:
queryset = User.objects.all()
resource_name = 'users'
fields = ['id', 'username', 'first_name', 'last_name', 'last_login']
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
include_resource_uri = True
filtering = {
'id': ALL,
'username': ALL,
'first_name': ALL,
'last_name': ALL
}
authorization = DjangoAuthorization()
authentication = DojoApiKeyAuthentication()
serializer = Serializer(formats=['json'])
"""
/api/v1/product_types/
GET [/id/], DELETE [/id/]
Expects: no params
Returns product_types: ALL
Relevant apply filter ?id=?
POST, PUT [/id/]
Expects *name
"""
class ProductTypeResource(BaseModelResource):
class Meta:
resource_name = 'product_types'
list_allowed_methods = ['get', 'post']
# disabled delete. Should not be allowed without fine authorization.
detail_allowed_methods = ['get', 'post', 'put']
queryset = Product_Type.objects.all().order_by('id')
include_resource_uri = True
filtering = {
'id': ALL,
'name': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=Product_TypeForm, resource=ProductTypeResource)
"""
POST, PUT
Expects *product name, *description, *prod_type [1-7]
"""
class ProductResource(BaseModelResource):
class Meta:
resource_name = 'products'
# disabled delete. Should not be allowed without fine authorization.
list_allowed_methods = ['get', 'post'] # only allow get for lists
detail_allowed_methods = ['get', 'post', 'put']
queryset = Product.objects.all().order_by('name')
ordering = ['name', 'id', 'description', 'findings_count', 'created',
'product_type_id']
excludes = ['tid', 'manager', 'prod_manager', 'tech_contact',
'updated']
include_resource_uri = True
filtering = {
'id': ALL,
'name': ALL,
'prod_type': ALL,
'created': ALL,
'findings_count': ALL
}
authentication = DojoApiKeyAuthentication()
authorization = UserProductsOnlyAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=ProductForm, resource=ProductResource)
def dehydrate(self, bundle):
# Append the tags in a comma delimited list with the tag element
"""
tags = ""
for tag in bundle.obj.tags:
tags = tags + str(tag) + ","
if len(tags) > 0:
tags = tags[:-1]
bundle.data['tags'] = tags
"""
try:
bundle.data['prod_type'] = bundle.obj.prod_type
except:
bundle.data['prod_type'] = 'unknown'
bundle.data['findings_count'] = bundle.obj.findings_count
return bundle
def obj_create(self, bundle, request=None, **kwargs):
bundle = super(ProductResource, self).obj_create(bundle)
"""
tags = bundle.data['tags']
bundle.obj.tags = tags
"""
return bundle
def obj_update(self, bundle, request=None, **kwargs):
bundle = super(ProductResource, self).obj_update(bundle, request, **kwargs)
"""
tags = bundle.data['tags']
bundle.obj.tags = tags
"""
return bundle
"""
/api/v1/tool_configurations/
GET [/id/], DELETE [/id/]
Expects: no params or id
Returns Tool_ConfigurationResource
Relevant apply filter ?test_type=?, ?id=?
POST, PUT, DLETE [/id/]
"""
class Tool_TypeResource(BaseModelResource):
class Meta:
resource_name = 'tool_types'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = Tool_Type.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'name': ALL,
'description': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=ToolTypeForm, resource=Tool_TypeResource)
"""
/api/v1/tool_configurations/
GET [/id/], DELETE [/id/]
Expects: no params or id
Returns Tool_ConfigurationResource
Relevant apply filter ?test_type=?, ?id=?
POST, PUT, DLETE [/id/]
"""
class Tool_ConfigurationResource(BaseModelResource):
tool_type = fields.ForeignKey(Tool_TypeResource, 'tool_type', full=False, null=False)
class Meta:
resource_name = 'tool_configurations'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = Tool_Configuration.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'name': ALL,
'tool_type': ALL_WITH_RELATIONS,
'tool_project_id': ALL,
'url': ALL,
'authentication_type': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=ToolConfigForm, resource=Tool_ConfigurationResource)
"""
POST, PUT [/id/]
Expects *product *target_start, *target_end, *status[In Progress, On Hold,
Completed], threat_model, pen_test, api_test, check_list
"""
class EngagementResource(BaseModelResource):
product = fields.ForeignKey(ProductResource, 'product',
full=False, null=False)
lead = fields.ForeignKey(UserResource, 'lead',
full=False, null=True)
source_code_management_server = fields.ForeignKey(Tool_ConfigurationResource, 'source_code_management_server',
full=False, null=True)
build_server = fields.ForeignKey(Tool_ConfigurationResource, 'build_server',
full=False, null=True)
orchestration_engine = fields.ForeignKey(Tool_ConfigurationResource, 'orchestration_engine',
full=False, null=True)
class Meta:
resource_name = 'engagements'
list_allowed_methods = ['get', 'post', 'patch']
# disabled delete for /id/
detail_allowed_methods = ['get', 'post', 'put', 'patch']
queryset = Engagement.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'active': ALL,
'eng_type': ALL,
'target_start': ALL,
'target_end': ALL,
'requester': ALL,
'report_type': ALL,
'updated': ALL,
'threat_model': ALL,
'api_test': ALL,
'pen_test': ALL,
'status': ALL,
'product': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=EngForm, resource=EngagementResource)
def dehydrate(self, bundle):
if bundle.obj.eng_type is not None:
bundle.data['eng_type'] = bundle.obj.eng_type.name
else:
bundle.data['eng_type'] = None
bundle.data['product_id'] = bundle.obj.product.id
bundle.data['report_type'] = bundle.obj.report_type
bundle.data['requester'] = bundle.obj.requester
return bundle
"""
/api/v1/app_analysis/
GET [/id/], DELETE [/id/]
Expects: no params or id
Returns Tool_ConfigurationResource
Relevant apply filter ?test_type=?, ?id=?
POST, PUT, DLETE [/id/]
"""
class App_AnalysisResource(BaseModelResource):
product = fields.ForeignKey(ProductResource, 'product',
full=False, null=False)
user = fields.ForeignKey(UserResource, 'user', null=False)
class Meta:
resource_name = 'app_analysis'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = App_Analysis.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'product': ALL_WITH_RELATIONS,
'user': ALL,
'confidence': ALL,
'version': ALL,
'icon': ALL,
'website': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=App_AnalysisTypeForm, resource=App_AnalysisResource)
"""
/api/v1/language_types/
GET [/id/], DELETE [/id/]
Expects: no params or id
Returns Tool_ConfigurationResource
Relevant apply filter ?test_type=?, ?id=?
POST, PUT, DLETE [/id/]
"""
class LanguageTypeResource(BaseModelResource):
class Meta:
resource_name = 'language_types'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = Language_Type.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'language': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=Languages_TypeTypeForm, resource=LanguageTypeResource)
"""
/api/v1/languages/
GET [/id/], DELETE [/id/]
Expects: no params or id
Returns Tool_ConfigurationResource
Relevant apply filter ?test_type=?, ?id=?
POST, PUT, DELETE [/id/]
"""
class LanguagesResource(BaseModelResource):
product = fields.ForeignKey(ProductResource, 'product',
full=False, null=False)
language_type = fields.ForeignKey(LanguageTypeResource, 'language', full=False, null=False)
user = fields.ForeignKey(UserResource, 'user', null=False)
class Meta:
resource_name = 'languages'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = Languages.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'files': ALL,
'language_type': ALL_WITH_RELATIONS,
'product': ALL_WITH_RELATIONS,
'user': ALL,
'blank': ALL,
'comment': ALL,
'code': ALL
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=LanguagesTypeForm, resource=LanguagesResource)
"""
/api/v1/tool_product_settings/
GET [/id/], DELETE [/id/]
Expects: no params or id
Returns ToolProductSettingsResource
Relevant apply filter ?test_type=?, ?id=?
POST, PUT, DLETE [/id/]
"""
class ToolProductSettingsResource(BaseModelResource):
product = fields.ForeignKey(ProductResource, 'product',
full=False, null=False)
tool_configuration = fields.ForeignKey(Tool_ConfigurationResource, 'tool_configuration', full=False, null=False)
class Meta:
resource_name = 'tool_product_settings'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = Tool_Product_Settings.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'name': ALL,
'product': ALL_WITH_RELATIONS,
'tool_configuration': ALL_WITH_RELATIONS,
'tool_project_id': ALL,
'url': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=ToolProductSettingsForm, resource=ToolProductSettingsResource)
"""
/api/v1/endpoints/
GET [/id/], DELETE [/id/]
Expects: no params or endpoint id
Returns endpoint
Relevant apply filter ?test_type=?, ?id=?
POST, PUT, DLETE [/id/]
"""
class EndpointResource(BaseModelResource):
product = fields.ForeignKey(ProductResource, 'product',
full=False, null=False)
class Meta:
resource_name = 'endpoints'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = Endpoint.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'host': ALL,
'product': ALL_WITH_RELATIONS,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=EditEndpointForm, resource=EndpointResource)
"""
/api/v1/jira_configurations/
GET [/id/], DELETE [/id/]
Expects: no params or JIRA_PKey
Returns jira configuration: ALL or by JIRA_PKey
POST, PUT [/id/]
"""
class JIRA_IssueResource(BaseModelResource):
class Meta:
resource_name = 'jira_finding_mappings'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = JIRA_Issue.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'jira_id': ALL,
'jira_key': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=JIRA_IssueForm, resource=JIRA_IssueResource)
"""
/api/v1/jira_configurations/
GET [/id/], DELETE [/id/]
Expects: no params or JIRA_PKey
Returns jira configuration: ALL or by JIRA_PKey
POST, PUT [/id/]
"""
class JIRA_ConfResource(BaseModelResource):
class Meta:
resource_name = 'jira_configurations'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = JIRA_Conf.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'url': ALL
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=JIRAForm, resource=JIRA_ConfResource)
"""
/api/v1/jira/
GET [/id/], DELETE [/id/]
Expects: no params or jira product key
POST, PUT, DELETE [/id/]
"""
class JiraResource(BaseModelResource):
product = fields.ForeignKey(ProductResource, 'product',
full=False, null=False)
conf = fields.ForeignKey(JIRA_ConfResource, 'conf',
full=False, null=True)
class Meta:
resource_name = 'jira_product_configurations'
list_allowed_methods = ['get', 'post', 'put', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
queryset = JIRA_PKey.objects.all()
include_resource_uri = True
filtering = {
'id': ALL,
'conf': ALL,
'product': ALL_WITH_RELATIONS,
'component': ALL,
'project_key': ALL,
'push_all_issues': ALL,
'enable_engagement_epic_mapping': ALL,
'push_notes': ALL
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=JIRA_PKeyForm, resource=JiraResource)
"""
/api/v1/environments/
GET [/id/]
Expects: no params
Returns environments: ALL
Relevant apply filter ?id=?
POST, PUT [/id/]
Expects *name
"""
class DevelopmentEnvironmentResource(BaseModelResource):
class Meta:
resource_name = 'development_environments'
list_allowed_methods = ['get', 'post']
# disabled delete. Should not be allowed without fine authorization.
detail_allowed_methods = ['get', 'post', 'put']
queryset = Development_Environment.objects.all().order_by('id')
include_resource_uri = True
filtering = {
'id': ALL,
'name': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=Development_EnvironmentForm, resource=DevelopmentEnvironmentResource)
"""
/api/v1/test_types/
GET [/id/]
Expects: no params
Returns environments: ALL
Relevant apply filter ?id=?
POST, PUT [/id/]
Expects *name
"""
class TestTypeResource(BaseModelResource):
class Meta:
resource_name = 'test_types'
list_allowed_methods = ['get', 'post']
# disabled delete. Should not be allowed without fine authorization.
detail_allowed_methods = ['get', 'post', 'put']
queryset = Test_Type.objects.all().order_by('id')
include_resource_uri = True
filtering = {
'id': ALL,
'name': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=Test_TypeForm, resource=TestTypeResource)
"""
/api/v1/tests/
GET [/id/], DELETE [/id/]
Expects: no params or engagement_id
Returns test: ALL or by engagement_id
Relevant apply filter ?test_type=?, ?id=?
POST, PUT [/id/]
Expects *test_type, *engagement, *target_start, *target_end,
estimated_time, actual_time, percent_complete, notes
"""
class TestResource(BaseModelResource):
engagement = fields.ForeignKey(EngagementResource, 'engagement',
full=False, null=False)
class Meta:
resource_name = 'tests'
list_allowed_methods = ['get', 'post']
# disabled delete. Should not be allowed without fine authorization.
detail_allowed_methods = ['get', 'post', 'put']
queryset = Test.objects.all().order_by('target_end')
include_resource_uri = True
filtering = {
'id': ALL,
'test_type': ALL,
'target_start': ALL,
'target_end': ALL,
'notes': ALL,
'percent_complete': ALL,
'actual_time': ALL,
'engagement': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=TestForm, resource=TestResource)
def dehydrate(self, bundle):
bundle.data['test_type'] = bundle.obj.test_type
return bundle
class RiskAcceptanceResource(BaseModelResource):
class Meta:
resource_name = 'risk_acceptances'
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
queryset = Risk_Acceptance.objects.all().order_by('created')
"""
/api/v1/findings/
GET [/id/], DELETE [/id/]
Expects: no params or test_id
Returns test: ALL or by test_id
Relevant apply filter ?active=True, ?id=?, ?severity=?
POST, PUT [/id/]
Expects *title, *date, *severity, *description, *mitigation, *impact,
*endpoint, *test, cwe, active, false_p, verified,
mitigated, *reporter
"""
class FindingResource(BaseModelResource):
reporter = fields.ForeignKey(UserResource, 'reporter', null=False)
test = fields.ForeignKey(TestResource, 'test', null=False)
# risk_acceptance = fields.ManyToManyField(RiskAcceptanceResource, 'risk_acceptance', full=True, null=True)
product = fields.ForeignKey(ProductResource, 'test__engagement__product', full=False, null=False)
engagement = fields.ForeignKey(EngagementResource, 'test__engagement', full=False, null=False)
class Meta:
resource_name = 'findings'
queryset = Finding.objects.select_related("test")
# deleting of findings is not allowed via API.
# Admin interface can be used for this.
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'post', 'put']
include_resource_uri = True
filtering = {
'id': ALL,
'title': ALL,
'date': ALL,
'severity': ALL,
'description': ALL,
'mitigated': ALL,
'endpoint': ALL,
'test': ALL_WITH_RELATIONS,
'active': ALL,
'verified': ALL,
'false_p': ALL,
'reporter': ALL,
'url': ALL,
'out_of_scope': ALL,
'duplicate': ALL,
# 'risk_acceptance': ALL_WITH_RELATIONS,
'engagement': ALL_WITH_RELATIONS,
'product': ALL_WITH_RELATIONS
# 'build_id': ALL
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=FindingForm, resource=FindingResource)
def dehydrate(self, bundle):
engagement = Engagement.objects.select_related('product'). \
filter(test__finding__id=bundle.obj.id)
bundle.data['engagement'] = "/api/v1/engagements/%s/" % engagement[0].id
bundle.data['product'] = \
"/api/v1/products/%s/" % engagement[0].product.id
return bundle
"""
/api/v1/finding_templates/
GET [/id/], DELETE [/id/]
Expects: no params or test_id
Returns test: ALL or by test_id
Relevant apply filter ?active=True, ?id=?, ?severity=?
POST, PUT [/id/]
Expects *title, *severity, *description, *mitigation, *impact,
*endpoint, *test, cwe, active, false_p, verified,
mitigated, *reporter
"""
class FindingTemplateResource(BaseModelResource):
class Meta:
resource_name = 'finding_templates'
queryset = Finding_Template.objects.all()
excludes = ['numerical_severity']
# deleting of Finding_Template is not allowed via API.
# Admin interface can be used for this.
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'post', 'put']
include_resource_uri = True
"""
title = models.TextField(max_length=1000)
cwe = models.IntegerField(default=None, null=True, blank=True)
severity = models.CharField(max_length=200, null=True, blank=True)
description = models.TextField(null=True, blank=True)
mitigation = models.TextField(null=True, blank=True)
impact = models.TextField(null=True, blank=True)
references = models.TextField(null=True, blank=True, db_column="refs")
numerical_severity
"""
filtering = {
'id': ALL,
'title': ALL,
'cwe': ALL,
'severity': ALL,
'description': ALL,
'mitigated': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=FindingTemplateForm, resource=FindingTemplateResource)
class StubFindingResource(BaseModelResource):
reporter = fields.ForeignKey(UserResource, 'reporter', null=False)
test = fields.ForeignKey(TestResource, 'test', null=False)
class Meta:
resource_name = 'stub_findings'
queryset = Stub_Finding.objects.select_related("test")
# deleting of findings is not allowed via UI or API.
# Admin interface can be used for this.
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'post', 'put']
include_resource_uri = True
filtering = {
'id': ALL,
'title': ALL,
'date': ALL,
'severity': ALL,
'description': ALL,
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=StubFindingForm, resource=StubFindingResource)
def dehydrate(self, bundle):
engagement = Engagement.objects.select_related('product'). \
filter(test__stub_finding__id=bundle.obj.id)
bundle.data['engagement'] = "/api/v1/engagements/%s/" % engagement[0].id
bundle.data['product'] = \
"/api/v1/products/%s/" % engagement[0].product.id
return bundle
'''
/api/v1/scansettings/
GET [/id/], DELETE [/id/]
Expects: no params or product_id
Returns test: ALL or by product_id
POST, PUT [/id/]
Expects *addresses, *user, *date, *frequency, *email, *product
'''
class ScanSettingsResource(BaseModelResource):
user = fields.ForeignKey(UserResource, 'user', null=False)
product = fields.ForeignKey(ProductResource, 'product', null=False)
class Meta:
resource_name = 'scan_settings'
queryset = ScanSettings.objects.all()
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'put', 'post', 'delete']
include_resource_uri = True
filtering = {
'id': ALL,
'date': ALL,
'user': ALL,
'frequency': ALL,
'product': ALL,
'addresses': ALL
}
authentication = DojoApiKeyAuthentication()
authorization = UserScanSettingsAuthorization()
serializer = Serializer(formats=['json'])
@property
def validation(self):
return ModelFormValidation(form_class=ScanSettingsForm, resource=ScanSettingsResource)
"""
/api/v1/ipscans/
Not exposed via API - but used as part of
ScanResource return values
"""
class IPScanResource(BaseModelResource):
class Meta:
resource_name = 'ipscans'
queryset = IPScan.objects.all()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
include_resource_uri = True
filtering = {
'id': ALL,
'address': ALL,
'services': ALL,
'scan': ALL
}
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
serializer = Serializer(formats=['json'])
"""
/api/v1/scans/
GET [/id/], DELETE [/id/]
Expects: no params
Returns scans: ALL
Relevant filters: ?scan_setting=?
"""
class ScanResource(BaseModelResource):
scan_settings = fields.ForeignKey(ScanSettingsResource,
'scan_settings',
null=False)
ipscans = fields.ToManyField(
IPScanResource,
attribute=lambda bundle: IPScan.objects.filter(
scan__id=bundle.obj.id) if IPScan.objects.filter(
scan__id=bundle.obj.id) != [] else [], full=True, null=True)
class Meta:
resource_name = 'scans'
queryset = Scan.objects.all()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
include_resource_uri = True
filtering = {
'id': ALL,
'date': ALL,
'scan_settings': ALL
}
authentication = DojoApiKeyAuthentication()
authorization = UserScanAuthorization()
serializer = Serializer(formats=['json'])
# Method used to get Private Key from uri, Used in the ImportScan and ReImportScan resources
def get_pk_from_uri(uri):
prefix = get_script_prefix()
chomped_uri = uri
if prefix and chomped_uri.startswith(prefix):
chomped_uri = chomped_uri[len(prefix) - 1:]
try:
view, args, kwargs = resolve(chomped_uri.replace('//', '/'))
except Resolver404:
raise NotFound("The URL provided '%s' was not a link to a valid resource." % uri)
return kwargs['pk']
"""
/api/v1/importscan/
POST
Expects file, scan_date, scan_type, tags, active, engagement
"""
# Create an Object that will store all the information sent to the endpoint
class ImportScanObject(object):
def __init__(self, initial=None):
self.__dict__['_data'] = {}
if initial:
self.update(initial)
def __getattr__(self, name):
return self._data.get(name, None)
def __setattr__(self, name, value):
self.__dict__['_data'][name] = value
def update(self, other):
for k in other:
self.__setattr__(k, other[k])
def to_dict(self):
return self._data
# The default form validation was buggy so I implemented a custom validation class
class ImportScanValidation(Validation):
def is_valid(self, bundle, request=None):
if not bundle.data:
return {'__all__': 'You didn\'t seem to pass anything in.'}
errors = {}
# Make sure file is present
if 'file' not in bundle.data:
errors.setdefault('file', []).append('You must pass a file in to be imported')
# Make sure scan_date matches required format
if 'scan_date' in bundle.data:
try:
datetime.strptime(bundle.data['scan_date'], '%Y-%m-%d')
except ValueError:
errors.setdefault('scan_date', []).append("Incorrect scan_date format, should be YYYY-MM-DD")
# Make sure scan_type and minimum_severity have valid options
if 'engagement' not in bundle.data:
errors.setdefault('engagement', []).append('engagement must be given')
else:
# verify the engagement is valid
try:
get_pk_from_uri(uri=bundle.data['engagement'])
except NotFound:
errors.setdefault('engagement', []).append('A valid engagement must be supplied. Ex. /api/v1/engagements/1/')
scan_type_list = list([x[0] for x in ImportScanForm.SCAN_TYPE_CHOICES])
if 'scan_type' in bundle.data:
if bundle.data['scan_type'] not in scan_type_list:
errors.setdefault('scan_type', []).append('scan_type must be one of the following: ' + ', '.join(scan_type_list))
else:
errors.setdefault('scan_type', []).append('A scan_type must be given so we know how to import the scan file.')
try:
if 'test_type' in bundle.data:
Test_Type.objects.get(name=bundle.data.get('test_type'))
else:
bundle.data['test_type'] = bundle.data.get('scan_type')
except Test_Type.DoesNotExist:
errors.setdefault('test_type', []).append(
'test_type must be one of the following: ' +
', '.join(Test_Type.objects.values_list("name", flat=True)))
severity_list = list([x[0] for x in SEVERITY_CHOICES])
if 'minimum_severity' in bundle.data:
if bundle.data['minimum_severity'] not in severity_list:
errors.setdefault('minimum_severity', []).append('minimum_severity must be one of the following: ' + ', '.join(severity_list))
# Make sure active and verified are booleans
if 'active' in bundle.data:
if bundle.data['active'] in ['false', 'False', '0']:
bundle.data['active'] = False
elif bundle.data['active'] in ['true', 'True', '1']:
bundle.data['active'] = True
if not isinstance(bundle.data['active'], bool):
errors.setdefault('active', []).append('active must be a boolean')
if 'verified' in bundle.data:
if bundle.data['verified'] in ['false', 'False', '0']:
bundle.data['verified'] = False
elif bundle.data['verified'] in ['true', 'True', '1']:
bundle.data['verified'] = True
if not isinstance(bundle.data['verified'], bool):
errors.setdefault('verified', []).append('verified must be a boolean')
return errors
class BuildDetails(MultipartResource, Resource):
file = fields.FileField(attribute='file')
engagement = fields.CharField(attribute='engagement')
class Meta:
resource_name = 'build_details'
fields = ['engagement', 'file']
list_allowed_methods = ['post']
detail_allowed_methods = []
include_resource_uri = True
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
object_class = ImportScanObject
def hydrate(self, bundle):
bundle.obj.__setattr__('engagement_obj',
Engagement.objects.get(id=get_pk_from_uri(bundle.data['engagement'])))
return bundle
def obj_create(self, bundle, **kwargs):
bundle.obj = ImportScanObject(initial=kwargs)
self.is_valid(bundle)
if bundle.errors:
raise ImmediateHttpResponse(response=self.error_response(bundle.request, bundle.errors))
bundle = self.full_hydrate(bundle)
import_object_eng(bundle.request, bundle.obj.__getattr__('engagement_obj'), bundle.data['file'])
class ImportScanResource(MultipartResource, Resource):
scan_date = fields.DateTimeField(attribute='scan_date')
minimum_severity = fields.CharField(attribute='minimum_severity')
active = fields.BooleanField(attribute='active')
verified = fields.BooleanField(attribute='verified')
scan_type = fields.CharField(attribute='scan_type')
test_type = fields.CharField(attribute='test_type')
tags = fields.CharField(attribute='tags')
file = fields.FileField(attribute='file')
engagement = fields.CharField(attribute='engagement')
lead = fields.CharField(attribute='lead')
class Meta:
resource_name = 'importscan'
fields = ['scan_date', 'minimum_severity', 'active', 'verified', 'scan_type', 'tags', 'file', 'lead']
list_allowed_methods = ['post']
detail_allowed_methods = []
include_resource_uri = True
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
validation = ImportScanValidation()
object_class = ImportScanObject
def hydrate(self, bundle):
if 'scan_date' not in bundle.data:
bundle.data['scan_date'] = datetime.now().strftime("%Y/%m/%d")
if 'minimum_severity' not in bundle.data:
bundle.data['minimum_severity'] = "Info"
if 'active' not in bundle.data:
bundle.data['active'] = True
if 'verified' not in bundle.data:
bundle.data['verified'] = True
if 'tags' not in bundle.data:
bundle.data['tags'] = ""
if 'lead' in bundle.data:
bundle.obj.__setattr__('user_obj',
User.objects.get(id=get_pk_from_uri(bundle.data['lead'])))
bundle.obj.__setattr__('engagement_obj',
Engagement.objects.get(id=get_pk_from_uri(bundle.data['engagement'])))
return bundle
def detail_uri_kwargs(self, bundle_or_obj):
kwargs = {}
return kwargs
def obj_create(self, bundle, **kwargs):
bundle.obj = ImportScanObject(initial=kwargs)
self.is_valid(bundle)
if bundle.errors:
raise ImmediateHttpResponse(response=self.error_response(bundle.request, bundle.errors))
bundle = self.full_hydrate(bundle)
# We now have all the options we need and will just replicate the process in views.py
tt, t_created = Test_Type.objects.get_or_create(name=bundle.data.get('test_type', bundle.data['scan_type']))
# will save in development environment
environment, env_created = Development_Environment.objects.get_or_create(name="Development")
scan_date = datetime.strptime(bundle.data['scan_date'], '%Y-%m-%d')
t = Test(engagement=bundle.obj.__getattr__('engagement_obj'), lead=bundle.obj.__getattr__('user_obj'), test_type=tt, target_start=scan_date,
target_end=scan_date, environment=environment, percent_complete=100)
try:
t.full_clean()
except ValidationError as e:
print("Error Validating Test Object")
print(e)
t.save()
t.tags = bundle.data['tags']
try:
parser = import_parser_factory(bundle.data['file'], t, True, True, bundle.data['scan_type'])
except ValueError:
raise NotFound("Parser ValueError")
try:
for item in parser.items:
sev = item.severity
if sev == 'Information' or sev == 'Informational':
sev = 'Info'
item.severity = sev
if Finding.SEVERITIES[sev] > Finding.SEVERITIES[bundle.data['minimum_severity']]:
continue
item.test = t
item.date = t.target_start
item.reporter = bundle.request.user
item.last_reviewed = timezone.now()
item.last_reviewed_by = bundle.request.user
item.active = bundle.data['active']
item.verified = bundle.data['verified']
item.save(dedupe_option=False)
if hasattr(item, 'unsaved_req_resp') and len(item.unsaved_req_resp) > 0:
for req_resp in item.unsaved_req_resp:
burp_rr = BurpRawRequestResponse(finding=item,
burpRequestBase64=req_resp["req"],
burpResponseBase64=req_resp["resp"],
)
burp_rr.clean()
burp_rr.save()
if item.unsaved_request is not None and item.unsaved_response is not None:
burp_rr = BurpRawRequestResponse(finding=item,
burpRequestBase64=item.unsaved_request,
burpResponseBase64=item.unsaved_response,
)
burp_rr.clean()
burp_rr.save()
for endpoint in item.unsaved_endpoints:
ep, created = Endpoint.objects.get_or_create(protocol=endpoint.protocol,
host=endpoint.host,
path=endpoint.path,
query=endpoint.query,
fragment=endpoint.fragment,
product=t.engagement.product)
item.endpoints.add(ep)
item.save()
if item.unsaved_tags is not None:
item.tags = item.unsaved_tags
except SyntaxError:
raise NotFound("Parser SyntaxError")
# Everything executed fine. We successfully imported the scan.
res = TestResource()
uri = res.get_resource_uri(t)
raise ImmediateHttpResponse(HttpCreated(location=uri))
# The default form validation was buggy so I implemented a custom validation class
class ReImportScanValidation(Validation):
def is_valid(self, bundle, request=None):
if not bundle.data:
return {'__all__': 'You didn\'t seem to pass anything in.'}
errors = {}
# Make sure file is present
if 'file' not in bundle.data:
errors.setdefault('file', []).append('You must pass a file in to be imported')
# Make sure scan_date matches required format
if 'scan_date' in bundle.data:
try:
datetime.strptime(bundle.data['scan_date'], '%Y/%m/%d')
except ValueError:
errors.setdefault('scan_date', []).append("Incorrect scan_date format, should be YYYY/MM/DD")
# Make sure scan_type and minimum_severity have valid options
if 'test' not in bundle.data:
errors.setdefault('test', []).append('test must be given')
else:
# verify the test is valid
try:
get_pk_from_uri(uri=bundle.data['test'])
except NotFound:
errors.setdefault('test', []).append('A valid test must be supplied. Ex. /api/v1/tests/1/')
scan_type_list = list([x[0] for x in ImportScanForm.SCAN_TYPE_CHOICES])
if 'scan_type' in bundle.data:
if bundle.data['scan_type'] not in scan_type_list:
errors.setdefault('scan_type', []).append('scan_type must be one of the following: ' + ', '.join(scan_type_list))
else:
errors.setdefault('scan_type', []).append('A scan_type must be given so we know how to import the scan file.')
severity_list = list([x[0] for x in SEVERITY_CHOICES])
if 'minimum_severity' in bundle.data:
if bundle.data['minimum_severity'] not in severity_list:
errors.setdefault('minimum_severity', []).append('minimum_severity must be one of the following: ' + ', '.join(severity_list))
# Make sure active and verified are booleans
if 'active' in bundle.data:
if bundle.data['active'] in ['false', 'False', '0']:
bundle.data['active'] = False
elif bundle.data['active'] in ['true', 'True', '1']:
bundle.data['active'] = True
if not isinstance(bundle.data['active'], bool):
errors.setdefault('active', []).append('active must be a boolean')
if 'verified' in bundle.data:
if bundle.data['verified'] in ['false', 'False', '0']:
bundle.data['verified'] = False
elif bundle.data['verified'] in ['true', 'True', '1']:
bundle.data['verified'] = True
if not isinstance(bundle.data['verified'], bool):
errors.setdefault('verified', []).append('verified must be a boolean')
return errors
class ReImportScanResource(MultipartResource, Resource):
scan_date = fields.DateTimeField(attribute='scan_date')
minimum_severity = fields.CharField(attribute='minimum_severity')
active = fields.BooleanField(attribute='active')
verified = fields.BooleanField(attribute='verified')
scan_type = fields.CharField(attribute='scan_type')
tags = fields.CharField(attribute='tags')
file = fields.FileField(attribute='file')
test = fields.CharField(attribute='test')
class Meta:
resource_name = 'reimportscan'
fields = ['scan_date', 'minimum_severity', 'active', 'verified', 'scan_type', 'tags', 'file']
list_allowed_methods = ['post']
detail_allowed_methods = []
include_resource_uri = True
authentication = DojoApiKeyAuthentication()
authorization = DjangoAuthorization()
validation = ReImportScanValidation()
object_class = ImportScanObject
def hydrate(self, bundle):
if 'scan_date' not in bundle.data:
bundle.data['scan_date'] = datetime.now().strftime("%Y/%m/%d")
if 'minimum_severity' not in bundle.data:
bundle.data['minimum_severity'] = "Info"
if 'active' not in bundle.data:
bundle.data['active'] = True
if 'verified' not in bundle.data:
bundle.data['verified'] = True
if 'tags' not in bundle.data:
bundle.data['tags'] = ""
bundle.obj.__setattr__('test_obj',
Test.objects.get(id=get_pk_from_uri(bundle.data['test'])))
return bundle
def detail_uri_kwargs(self, bundle_or_obj):
kwargs = {}
return kwargs
def obj_create(self, bundle, **kwargs):
bundle.obj = ImportScanObject(initial=kwargs)
self.is_valid(bundle)
if bundle.errors:
raise ImmediateHttpResponse(response=self.error_response(bundle.request, bundle.errors))
bundle = self.full_hydrate(bundle)
test = bundle.obj.__getattr__('test_obj')
scan_type = bundle.obj.__getattr__('scan_type')
min_sev = bundle.obj.__getattr__('minimum_severity')
scan_date = bundle.obj.__getattr__('scan_date')
verified = bundle.obj.__getattr__('verified')
active = bundle.obj.__getattr__('active')
try:
parser = import_parser_factory(bundle.data['file'], test, True, True, scan_type)
except ValueError:
raise NotFound("Parser ValueError")
try:
items = parser.items
original_items = test.finding_set.all().values_list("id", flat=True)
new_items = []
mitigated_count = 0
finding_count = 0
finding_added_count = 0
reactivated_count = 0
for item in items:
sev = item.severity
if sev == 'Information' or sev == 'Informational':
sev = 'Info'
if Finding.SEVERITIES[sev] > Finding.SEVERITIES[min_sev]:
continue
if scan_type == 'Veracode Scan' or scan_type == 'Arachni Scan':
find = Finding.objects.filter(title=item.title,
test__id=test.id,
severity=sev,
numerical_severity=Finding.get_numerical_severity(sev),
description=item.description
)
else:
find = Finding.objects.filter(title=item.title,
test__id=test.id,
severity=sev,
numerical_severity=Finding.get_numerical_severity(sev),
)
if len(find) == 1:
find = find[0]
if find.mitigated:
# it was once fixed, but now back
find.mitigated = None
find.mitigated_by = None
find.active = True
find.verified = verified
find.save()
note = Notes(entry="Re-activated by %s re-upload." % scan_type,
author=bundle.request.user)
note.save()
find.notes.add(note)
reactivated_count += 1
new_items.append(find.id)
else:
item.test = test
item.date = test.target_start
item.reporter = bundle.request.user
item.last_reviewed = timezone.now()
item.last_reviewed_by = bundle.request.user
item.verified = verified
item.active = active
item.save()
finding_added_count += 1
new_items.append(item.id)
find = item
if hasattr(item, 'unsaved_req_resp') and len(item.unsaved_req_resp) > 0:
for req_resp in item.unsaved_req_resp:
burp_rr = BurpRawRequestResponse(finding=find,
burpRequestBase64=req_resp["req"],
burpResponseBase64=req_resp["resp"],
)
burp_rr.clean()
burp_rr.save()
if item.unsaved_request is not None and item.unsaved_response is not None:
burp_rr = BurpRawRequestResponse(finding=find,
burpRequestBase64=item.unsaved_request,
burpResponseBase64=item.unsaved_response,
)
burp_rr.clean()
burp_rr.save()
if find:
finding_count += 1
for endpoint in item.unsaved_endpoints:
ep, created = Endpoint.objects.get_or_create(protocol=endpoint.protocol,
host=endpoint.host,
path=endpoint.path,
query=endpoint.query,
fragment=endpoint.fragment,
product=test.engagement.product)
find.endpoints.add(ep)
if item.unsaved_tags is not None:
find.tags = item.unsaved_tags
# calculate the difference
to_mitigate = set(original_items) - set(new_items)
for finding_id in to_mitigate:
finding = Finding.objects.get(id=finding_id)
finding.mitigated = datetime.combine(scan_date, timezone.now().time())
finding.mitigated_by = bundle.request.user
finding.active = False
finding.save()
note = Notes(entry="Mitigated by %s re-upload." % scan_type,
author=bundle.request.user)
note.save()
finding.notes.add(note)
mitigated_count += 1
except SyntaxError:
raise NotFound("Parser SyntaxError")
# Everything executed fine. We successfully imported the scan.
raise ImmediateHttpResponse(HttpCreated(location=bundle.obj.__getattr__('test')))
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import mock
import unittest
from rdflib import Graph, Namespace
from superphy.upload import classes
__author__ = "Stephen Kan"
__copyright__ = "© Copyright Government of Canada 2012-2015. Funded by the Government of Canada Genomics Research and Development Initiative"
__license__ = "ASL"
__version__ = "2.0"
__maintainer__ = "Stephen Kan"
__email__ = "stebokan@gmail.com"
n = Namespace("https://github.com/superphy#")
owl = Namespace("http://www.w3.org/2002/07/owl#")
rdf = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
xml = Namespace("http://www.w3.org/XML/1998/namespace")
xsd = Namespace("http://www.w3.org/2001/XMLSchema#")
rdfs = Namespace("http://www.w3.org/2000/01/rdf-schema#")
gfvo = Namespace("http://www.biointerchange.org/gfvo#")
faldo = Namespace("http://biohackathon.org/resource/faldo#")
class ClassesTestCase(unittest.TestCase):
def setUp(self):
self.graph = Graph()
def tearDown(self):
self.graph.remove((None, None, None))
del self.graph
def check_triples(self, fields, outputs):
print len(fields), len(outputs)
self.assertTrue(len(outputs) is len(fields))
for item in fields:
self.assertTrue(item in str(outputs))
# used for generating results for test to compare against (please manually confirm validity before using)
def temp_print(self, name):
print list(self.graph.objects(n[name]))
print list(self.graph.subjects(object=n[name]))
print self.graph.serialize(format="turtle")
def test_NamedIndividual(self):
instance = classes.NamedIndividual(self.graph, "foobar")
instance.rdf()
fields = ["http://www.w3.org/2002/07/owl#NamedIndividual"]
objects = list(self.graph.objects(n["foobar"]))
self.check_triples(fields, objects)
def test_User(self):
user = classes.User(self.graph, "test@test.com")
user.rdf()
fields = ["http://www.w3.org/2002/07/owl#NamedIndividual",
"test@test.com",
"https://github.com/superphy#User"]
objects = list(self.graph.objects(n["test@test.com"]))
self.check_triples(fields, objects)
def test_Organism(self):
organism = classes.Organism(self.graph, "ecoli", "Escherichia coli (E. coli)", "Escherichia coli", "E. coli", 562)
organism.rdf()
fields = ["E. coli",
"Escherichia coli",
"562",
"Escherichia coli (E. coli)",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#Organism"]
objects = list(self.graph.objects(n["ecoli"]))
self.check_triples(fields, objects)
def test_Host(self):
host = classes.Host(self.graph, "hsapiens", "Homo sapiens (human)", "Homo sapiens", "human", "human", 9606)
host.rdf()
fields = ["http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#Host",
"Homo sapiens (human)",
"9606",
"https://github.com/superphy#human",
"Homo sapiens",
"https://github.com/superphy#Organism",
"https://github.com/superphy#from_hsapiens",
"human"]
objects = list(self.graph.objects(n["hsapiens"]))
self.check_triples(fields, objects)
fields = ["https://github.com/superphy#from_hsapiens",
"https://github.com/superphy#human"]
subjects = list(self.graph.subjects(object=n["hsapiens"]))
self.check_triples(fields, subjects)
def test_Microbe(self):
microbe = classes.Microbe(self.graph, "ecoli", "Escherichia coli (E. coli)", "Escherichia coli", "E. coli", 562)
microbe.rdf()
fields = ["https://github.com/superphy#Organism",
"Escherichia coli",
"E. coli",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"562",
"Escherichia coli (E. coli)",
"https://github.com/superphy#Microbe"]
objects = list(self.graph.objects(n["ecoli"]))
self.check_triples(fields, objects)
def test_Attribute(self):
attribute = classes.Attribute(self.graph, "attribute")
attribute.rdf()
fields = ["http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#Attribute"]
objects = list(self.graph.objects(n["attribute"]))
self.check_triples(fields, objects)
def test_HostCategory(self):
hostcategory = classes.HostCategory(self.graph, "human", "Human")
hostcategory.rdf()
fields = ["Human",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#host_category"]
objects = list(self.graph.objects(n["human"]))
self.check_triples(fields, objects)
def test_IsolationAttribute(self):
isolation_attribute = classes.IsolationAttribute(self.graph, "from_hsapiens")
isolation_attribute.rdf()
fields = ["https://github.com/superphy#Attribute",
"https://github.com/superphy#isolation_attribute",
"http://www.w3.org/2002/07/owl#NamedIndividual"]
objects = list(self.graph.objects(n["from_hsapiens"]))
self.check_triples(fields, objects)
def test_FromHost(self):
from_host = classes.FromHost(self.graph, "hsapiens", "human")
from_host.rdf()
fields = ["http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#Attribute",
"https://github.com/superphy#isolation_attribute",
"https://github.com/superphy#isolation_from_host",
"https://github.com/superphy#human",
"https://github.com/superphy#hsapiens"]
objects = list(self.graph.objects(n["from_hsapiens"]))
self.check_triples(fields, objects)
fields = ["https://github.com/superphy#human",
"https://github.com/superphy#hsapiens"]
subjects = list(self.graph.subjects(object=n["from_hsapiens"]))
self.check_triples(fields, subjects)
def test_FromSource(self):
from_source = classes.FromSource(self.graph, "stool", "Stool", "human")
from_source.rdf()
fields = ["https://github.com/superphy#Attribute",
"https://github.com/superphy#human",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"Stool",
"https://github.com/superphy#isolation_from_source",
"https://github.com/superphy#isolation_attribute"]
objects = list(self.graph.objects(n["stool"]))
self.check_triples(fields, objects)
fields = ["https://github.com/superphy#human"]
subjects = list(self.graph.subjects(object=n["stool"]))
self.check_triples(fields, subjects)
def test_IsolationSyndrome(self):
isolation_syndrome = classes.IsolationSyndrome(self.graph, "meningitis", "Meningitis", "human")
isolation_syndrome.rdf()
fields = ["https://github.com/superphy#isolation_attribute",
"https://github.com/superphy#Attribute",
"https://github.com/superphy#human",
"Meningitis",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#isolation_syndrome"]
objects = list(self.graph.objects(n["meningitis"]))
self.check_triples(fields, objects)
fields = ["https://github.com/superphy#human"]
subjects = list(self.graph.subjects(object=n["meningitis"]))
self.check_triples(fields, subjects)
def test_Serotype(self):
serotype = classes.Serotype(self.graph, "OUnknown")
serotype.rdf()
fields = ["http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#serotype",
"https://github.com/superphy#Attribute"]
objects = list(self.graph.objects(n["OUnknown"]))
self.check_triples(fields, objects)
def test_Otype(self):
otype = classes.Otype(self.graph, 157)
otype.rdf()
fields =["http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#serotype",
"https://github.com/superphy#Attribute",
"https://github.com/superphy#Otype",
"157"]
objects = list(self.graph.objects(n["O157"]))
self.check_triples(fields, objects)
def test_Htype(self):
htype = classes.Htype(self.graph, 7)
htype.rdf()
fields =["http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#serotype",
"https://github.com/superphy#Attribute",
"https://github.com/superphy#Htype",
"7"]
objects = list(self.graph.objects(n["H7"]))
self.check_triples(fields, objects)
@mock.patch("superphy.upload._sparql.find_syndrome")
@mock.patch("superphy.upload._sparql.find_source")
@mock.patch("superphy.upload._sparql.find_from_host")
def test_Genome(self, mock_host, mock_source, mock_syndrome):
kwargs = {"isolation_date": {"2013-06-24"},
"isolation_location": {"United States, California, Santa Clara"},
"accession": {"JNOG00000000"},
"bioproject": {"251898"},
"biosample": {"2841129"},
"strain": {"CS03"},
"organism": "ecoli",
"isolation_host": {"Homo sapiens (human)"},
"isolation_source": {"Feces"},
"syndrome": {"Urinary tract infection (cystitis)"},
"Htype": "-",
"Otype": None,
}
mock_host.return_value = "https://github.com/superphy#from_hsapiens"
mock_source.return_value = "https://github.com/superphy#feces"
mock_syndrome.return_value = "https://github.com/superphy#uti"
genome = classes.Genome(self.graph, "JNOG00000000", **kwargs)
genome.rdf()
self.assertEqual(len(mock_host.mock_calls), 1)
self.assertEqual(len(mock_source.mock_calls), 1)
self.assertEqual(len(mock_syndrome.mock_calls), 1)
field = {"https://github.com/superphy#H-",
"https://github.com/superphy#from_hsapiens",
"United States, California, Santa Clara",
"JNOG00000000",
"https://github.com/superphy#ecoli",
"https://github.com/superphy#feces",
"2841129",
"https://github.com/superphy#uti",
"251898",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"CS03",
"2013-06-24",
"http://www.biointerchange.org/gfvo#Genome"}
objects = list(self.graph.objects(n["JNOG00000000"]))
self.check_triples(field, objects)
field = {"https://github.com/superphy#uti",
"https://github.com/superphy#from_hsapiens",
"https://github.com/superphy#H-",
"https://github.com/superphy#feces",
"https://github.com/superphy#ecoli"}
subjects = list(self.graph.subjects(object=n["JNOG00000000"]))
self.check_triples(field, subjects)
@mock.patch("superphy.upload._sparql.find_syndrome")
@mock.patch("superphy.upload._sparql.find_source")
@mock.patch("superphy.upload._sparql.find_from_host")
def test_PendingGenome(self, mock_host, mock_source, mock_syndrome):
kwargs = {"isolation_date": {"2013-06-24"},
"isolation_location": {"United States, California, Santa Clara"},
"accession": {"JNOG00000000"},
"bioproject": {"251898"},
"biosample": {"2841129"},
"strain": {"CS03"},
"organism": "ecoli",
"isolation_host": {"Homo sapiens (human)"},
"isolation_source": {"Feces"},
"syndrome": {"Urinary tract infection (cystitis)"},
"Htype": "-",
"Otype": None,
}
mock_host.return_value = "https://github.com/superphy#from_hsapiens"
mock_source.return_value = "https://github.com/superphy#feces"
mock_syndrome.return_value = "https://github.com/superphy#uti"
pending_genome = classes.PendingGenome(self.graph, "JNOG00000000", **kwargs)
pending_genome.rdf()
self.assertEqual(len(mock_host.mock_calls), 1)
self.assertEqual(len(mock_source.mock_calls), 1)
self.assertEqual(len(mock_syndrome.mock_calls), 1)
field = {"https://github.com/superphy#H-",
"https://github.com/superphy#from_hsapiens",
"United States, California, Santa Clara",
"JNOG00000000",
"https://github.com/superphy#ecoli",
"https://github.com/superphy#feces",
"2841129",
"https://github.com/superphy#uti",
"251898",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"CS03",
"2013-06-24",
"http://www.biointerchange.org/gfvo#Genome",
"https://github.com/superphy#pending_genome"}
objects = list(self.graph.objects(n["JNOG00000000"]))
self.check_triples(field, objects)
field = {"https://github.com/superphy#uti",
"https://github.com/superphy#from_hsapiens",
"https://github.com/superphy#H-",
"https://github.com/superphy#feces",
"https://github.com/superphy#ecoli"}
subjects = list(self.graph.subjects(object=n["JNOG00000000"]))
self.check_triples(field, subjects)
@mock.patch("superphy.upload._sparql.find_syndrome")
@mock.patch("superphy.upload._sparql.find_source")
@mock.patch("superphy.upload._sparql.find_from_host")
def test_CompletedGenome(self, mock_host, mock_source, mock_syndrome):
kwargs = {"isolation_date": {"2013-06-24"},
"isolation_location": {"United States, California, Santa Clara"},
"accession": {"JNOG00000000"},
"bioproject": {"251898"},
"biosample": {"2841129"},
"strain": {"CS03"},
"organism": "ecoli",
"isolation_host": {"Homo sapiens (human)"},
"isolation_source": {"Feces"},
"syndrome": {"Urinary tract infection (cystitis)"},
"Htype": "-",
"Otype": None,
}
mock_host.return_value = "https://github.com/superphy#from_hsapiens"
mock_source.return_value = "https://github.com/superphy#feces"
mock_syndrome.return_value = "https://github.com/superphy#uti"
completed_genome = classes.CompletedGenome(self.graph, "JNOG00000000", **kwargs)
completed_genome.rdf()
self.assertEqual(len(mock_host.mock_calls), 1)
self.assertEqual(len(mock_source.mock_calls), 1)
self.assertEqual(len(mock_syndrome.mock_calls), 1)
field = {"https://github.com/superphy#H-",
"https://github.com/superphy#from_hsapiens",
"United States, California, Santa Clara",
"JNOG00000000",
"https://github.com/superphy#ecoli",
"https://github.com/superphy#feces",
"2841129",
"https://github.com/superphy#uti",
"251898",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"CS03",
"2013-06-24",
"http://www.biointerchange.org/gfvo#Genome",
"https://github.com/superphy#completed_genome"}
objects = list(self.graph.objects(n["JNOG00000000"]))
self.check_triples(field, objects)
field = {"https://github.com/superphy#uti",
"https://github.com/superphy#from_hsapiens",
"https://github.com/superphy#H-",
"https://github.com/superphy#feces",
"https://github.com/superphy#ecoli"}
subjects = list(self.graph.subjects(object=n["JNOG00000000"]))
self.check_triples(field, subjects)
@mock.patch("superphy.upload._sparql.check_validation")
def test_Contig(self, mock_validation):
# Testing general contig without sequence validation
contig = classes.Contig(self.graph, "ANVW01000001", "ANVW00000000", "ATGATGATGATAGAGATGAGAT", "WGS")
contig.rdf()
field = {"http://www.w3.org/2002/07/owl#NamedIndividual",
"http://www.biointerchange.org/gfvo#Contig",
"ATGATGATGATAGAGATGAGAT",
"WGS",
"https://github.com/superphy#ANVW00000000"
}
objects = list(self.graph.objects(n["ANVW01000001"]))
self.check_triples(field, objects)
# Testing a false validation with the genome validated (i.e. skips if case)
mock_validation.return_value = True
contig.add_seq_validation(False)
self.assertEqual(len(mock_validation.mock_calls), 1)
field = {"False"}
objects = list(self.graph.objects(predicate=n.has_valid_sequence))
self.check_triples(field, objects)
# Testing a true validation
mock_validation.reset_mock()
mock_validation.return_value = False
contig.add_seq_validation(True)
field = {"True"}
objects = list(self.graph.objects(subject=n["ANVW00000000"], predicate=n.has_valid_sequence))
for thing in objects:
print thing
self.check_triples(field, objects)
def test_Gene(self):
kwargs = {"category": {"Adherence"},
"subcategory": {"AAF_II_fimbriae"},
"gene_type": {"virulence_factor"},
"vfo_id": {"3000001"}
}
gene = classes.Gene(self.graph, "hlyA", **kwargs)
gene.rdf()
field = {"http://www.biointerchange.org/gfvo#gene",
"3000001",
"https://github.com/superphy#Adherence",
"https://github.com/superphy#AAF_II_fimbriae",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#virulence_factor"
}
objects = list(self.graph.objects(n["hlyA"]))
self.check_triples(field, objects)
def test_GeneLocation(self):
# Testing a reference gene
genelocation = classes.GeneLocation(self.graph, "senB_ADUP01000050_0", "senB", "ADUP01000050", "146606", "147859",
"ATGGATATTTGGCGGGGACATTCGTTTCTGATGACAATTTCCGCT", True)
genelocation.rdf()
field = {"http://www.w3.org/2002/07/owl#NamedIndividual",
"http://biohackathon.org/resource/faldo#Region",
"https://github.com/superphy#ADUP01000050",
"https://github.com/superphy#reference_gene",
"https://github.com/superphy#senB_ADUP01000050_0_begin",
"https://github.com/superphy#senB_ADUP01000050_0_end",
"ATGGATATTTGGCGGGGACATTCGTTTCTGATGACAATTTCCGCT"
}
objects = list(self.graph.objects(n["senB_ADUP01000050_0"]))
self.check_triples(field, objects)
field = ["https://github.com/superphy#senB_ADUP01000050_0",
"http://www.biointerchange.org/gfvo#gene",
"https://github.com/superphy#senB_ADUP01000050_0"]
objects = list(self.graph.objects(n["senB"]))
self.check_triples(field, objects)
field = {"http://biohackathon.org/resource/faldo#Position",
"http://biohackathon.org/resource/faldo#ExactPosition",
"http://biohackathon.org/resource/faldo#ForwardStrandPosition",
"146606",
"https://github.com/superphy#ADUP01000050"
}
objects = list(self.graph.objects(n["senB_ADUP01000050_0_begin"]))
for thing in objects:
print thing
self.check_triples(field, objects)
if __name__ == '__main__':
unittest.main()
Fixes test for change in gene class
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import mock
import unittest
from rdflib import Graph, Namespace
from superphy.upload import classes
__author__ = "Stephen Kan"
__copyright__ = "© Copyright Government of Canada 2012-2015. Funded by the Government of Canada Genomics Research and Development Initiative"
__license__ = "ASL"
__version__ = "2.0"
__maintainer__ = "Stephen Kan"
__email__ = "stebokan@gmail.com"
n = Namespace("https://github.com/superphy#")
owl = Namespace("http://www.w3.org/2002/07/owl#")
rdf = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
xml = Namespace("http://www.w3.org/XML/1998/namespace")
xsd = Namespace("http://www.w3.org/2001/XMLSchema#")
rdfs = Namespace("http://www.w3.org/2000/01/rdf-schema#")
gfvo = Namespace("http://www.biointerchange.org/gfvo#")
faldo = Namespace("http://biohackathon.org/resource/faldo#")
class ClassesTestCase(unittest.TestCase):
def setUp(self):
self.graph = Graph()
def tearDown(self):
self.graph.remove((None, None, None))
del self.graph
def check_triples(self, fields, outputs):
print len(fields), len(outputs)
self.assertTrue(len(outputs) is len(fields))
for item in fields:
self.assertTrue(item in str(outputs))
# used for generating results for test to compare against (please manually confirm validity before using)
def temp_print(self, name):
print list(self.graph.objects(n[name]))
print list(self.graph.subjects(object=n[name]))
print self.graph.serialize(format="turtle")
def test_NamedIndividual(self):
instance = classes.NamedIndividual(self.graph, "foobar")
instance.rdf()
fields = ["http://www.w3.org/2002/07/owl#NamedIndividual"]
objects = list(self.graph.objects(n["foobar"]))
self.check_triples(fields, objects)
def test_User(self):
user = classes.User(self.graph, "test@test.com")
user.rdf()
fields = ["http://www.w3.org/2002/07/owl#NamedIndividual",
"test@test.com",
"https://github.com/superphy#User"]
objects = list(self.graph.objects(n["test@test.com"]))
self.check_triples(fields, objects)
def test_Organism(self):
organism = classes.Organism(self.graph, "ecoli", "Escherichia coli (E. coli)", "Escherichia coli", "E. coli", 562)
organism.rdf()
fields = ["E. coli",
"Escherichia coli",
"562",
"Escherichia coli (E. coli)",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#Organism"]
objects = list(self.graph.objects(n["ecoli"]))
self.check_triples(fields, objects)
def test_Host(self):
host = classes.Host(self.graph, "hsapiens", "Homo sapiens (human)", "Homo sapiens", "human", "human", 9606)
host.rdf()
fields = ["http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#Host",
"Homo sapiens (human)",
"9606",
"https://github.com/superphy#human",
"Homo sapiens",
"https://github.com/superphy#Organism",
"https://github.com/superphy#from_hsapiens",
"human"]
objects = list(self.graph.objects(n["hsapiens"]))
self.check_triples(fields, objects)
fields = ["https://github.com/superphy#from_hsapiens",
"https://github.com/superphy#human"]
subjects = list(self.graph.subjects(object=n["hsapiens"]))
self.check_triples(fields, subjects)
def test_Microbe(self):
microbe = classes.Microbe(self.graph, "ecoli", "Escherichia coli (E. coli)", "Escherichia coli", "E. coli", 562)
microbe.rdf()
fields = ["https://github.com/superphy#Organism",
"Escherichia coli",
"E. coli",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"562",
"Escherichia coli (E. coli)",
"https://github.com/superphy#Microbe"]
objects = list(self.graph.objects(n["ecoli"]))
self.check_triples(fields, objects)
def test_Attribute(self):
attribute = classes.Attribute(self.graph, "attribute")
attribute.rdf()
fields = ["http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#Attribute"]
objects = list(self.graph.objects(n["attribute"]))
self.check_triples(fields, objects)
def test_HostCategory(self):
hostcategory = classes.HostCategory(self.graph, "human", "Human")
hostcategory.rdf()
fields = ["Human",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#host_category"]
objects = list(self.graph.objects(n["human"]))
self.check_triples(fields, objects)
def test_IsolationAttribute(self):
isolation_attribute = classes.IsolationAttribute(self.graph, "from_hsapiens")
isolation_attribute.rdf()
fields = ["https://github.com/superphy#Attribute",
"https://github.com/superphy#isolation_attribute",
"http://www.w3.org/2002/07/owl#NamedIndividual"]
objects = list(self.graph.objects(n["from_hsapiens"]))
self.check_triples(fields, objects)
def test_FromHost(self):
from_host = classes.FromHost(self.graph, "hsapiens", "human")
from_host.rdf()
fields = ["http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#Attribute",
"https://github.com/superphy#isolation_attribute",
"https://github.com/superphy#isolation_from_host",
"https://github.com/superphy#human",
"https://github.com/superphy#hsapiens"]
objects = list(self.graph.objects(n["from_hsapiens"]))
self.check_triples(fields, objects)
fields = ["https://github.com/superphy#human",
"https://github.com/superphy#hsapiens"]
subjects = list(self.graph.subjects(object=n["from_hsapiens"]))
self.check_triples(fields, subjects)
def test_FromSource(self):
from_source = classes.FromSource(self.graph, "stool", "Stool", "human")
from_source.rdf()
fields = ["https://github.com/superphy#Attribute",
"https://github.com/superphy#human",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"Stool",
"https://github.com/superphy#isolation_from_source",
"https://github.com/superphy#isolation_attribute"]
objects = list(self.graph.objects(n["stool"]))
self.check_triples(fields, objects)
fields = ["https://github.com/superphy#human"]
subjects = list(self.graph.subjects(object=n["stool"]))
self.check_triples(fields, subjects)
def test_IsolationSyndrome(self):
isolation_syndrome = classes.IsolationSyndrome(self.graph, "meningitis", "Meningitis", "human")
isolation_syndrome.rdf()
fields = ["https://github.com/superphy#isolation_attribute",
"https://github.com/superphy#Attribute",
"https://github.com/superphy#human",
"Meningitis",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#isolation_syndrome"]
objects = list(self.graph.objects(n["meningitis"]))
self.check_triples(fields, objects)
fields = ["https://github.com/superphy#human"]
subjects = list(self.graph.subjects(object=n["meningitis"]))
self.check_triples(fields, subjects)
def test_Serotype(self):
serotype = classes.Serotype(self.graph, "OUnknown")
serotype.rdf()
fields = ["http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#serotype",
"https://github.com/superphy#Attribute"]
objects = list(self.graph.objects(n["OUnknown"]))
self.check_triples(fields, objects)
def test_Otype(self):
otype = classes.Otype(self.graph, 157)
otype.rdf()
fields =["http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#serotype",
"https://github.com/superphy#Attribute",
"https://github.com/superphy#Otype",
"157"]
objects = list(self.graph.objects(n["O157"]))
self.check_triples(fields, objects)
def test_Htype(self):
htype = classes.Htype(self.graph, 7)
htype.rdf()
fields =["http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#serotype",
"https://github.com/superphy#Attribute",
"https://github.com/superphy#Htype",
"7"]
objects = list(self.graph.objects(n["H7"]))
self.check_triples(fields, objects)
@mock.patch("superphy.upload._sparql.find_syndrome")
@mock.patch("superphy.upload._sparql.find_source")
@mock.patch("superphy.upload._sparql.find_from_host")
def test_Genome(self, mock_host, mock_source, mock_syndrome):
kwargs = {"isolation_date": {"2013-06-24"},
"isolation_location": {"United States, California, Santa Clara"},
"accession": {"JNOG00000000"},
"bioproject": {"251898"},
"biosample": {"2841129"},
"strain": {"CS03"},
"organism": "ecoli",
"isolation_host": {"Homo sapiens (human)"},
"isolation_source": {"Feces"},
"syndrome": {"Urinary tract infection (cystitis)"},
"Htype": "-",
"Otype": None,
}
mock_host.return_value = "https://github.com/superphy#from_hsapiens"
mock_source.return_value = "https://github.com/superphy#feces"
mock_syndrome.return_value = "https://github.com/superphy#uti"
genome = classes.Genome(self.graph, "JNOG00000000", **kwargs)
genome.rdf()
self.assertEqual(len(mock_host.mock_calls), 1)
self.assertEqual(len(mock_source.mock_calls), 1)
self.assertEqual(len(mock_syndrome.mock_calls), 1)
field = {"https://github.com/superphy#H-",
"https://github.com/superphy#from_hsapiens",
"United States, California, Santa Clara",
"JNOG00000000",
"https://github.com/superphy#ecoli",
"https://github.com/superphy#feces",
"2841129",
"https://github.com/superphy#uti",
"251898",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"CS03",
"2013-06-24",
"http://www.biointerchange.org/gfvo#Genome"}
objects = list(self.graph.objects(n["JNOG00000000"]))
self.check_triples(field, objects)
field = {"https://github.com/superphy#uti",
"https://github.com/superphy#from_hsapiens",
"https://github.com/superphy#H-",
"https://github.com/superphy#feces",
"https://github.com/superphy#ecoli"}
subjects = list(self.graph.subjects(object=n["JNOG00000000"]))
self.check_triples(field, subjects)
@mock.patch("superphy.upload._sparql.find_syndrome")
@mock.patch("superphy.upload._sparql.find_source")
@mock.patch("superphy.upload._sparql.find_from_host")
def test_PendingGenome(self, mock_host, mock_source, mock_syndrome):
kwargs = {"isolation_date": {"2013-06-24"},
"isolation_location": {"United States, California, Santa Clara"},
"accession": {"JNOG00000000"},
"bioproject": {"251898"},
"biosample": {"2841129"},
"strain": {"CS03"},
"organism": "ecoli",
"isolation_host": {"Homo sapiens (human)"},
"isolation_source": {"Feces"},
"syndrome": {"Urinary tract infection (cystitis)"},
"Htype": "-",
"Otype": None,
}
mock_host.return_value = "https://github.com/superphy#from_hsapiens"
mock_source.return_value = "https://github.com/superphy#feces"
mock_syndrome.return_value = "https://github.com/superphy#uti"
pending_genome = classes.PendingGenome(self.graph, "JNOG00000000", **kwargs)
pending_genome.rdf()
self.assertEqual(len(mock_host.mock_calls), 1)
self.assertEqual(len(mock_source.mock_calls), 1)
self.assertEqual(len(mock_syndrome.mock_calls), 1)
field = {"https://github.com/superphy#H-",
"https://github.com/superphy#from_hsapiens",
"United States, California, Santa Clara",
"JNOG00000000",
"https://github.com/superphy#ecoli",
"https://github.com/superphy#feces",
"2841129",
"https://github.com/superphy#uti",
"251898",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"CS03",
"2013-06-24",
"http://www.biointerchange.org/gfvo#Genome",
"https://github.com/superphy#pending_genome"}
objects = list(self.graph.objects(n["JNOG00000000"]))
self.check_triples(field, objects)
field = {"https://github.com/superphy#uti",
"https://github.com/superphy#from_hsapiens",
"https://github.com/superphy#H-",
"https://github.com/superphy#feces",
"https://github.com/superphy#ecoli"}
subjects = list(self.graph.subjects(object=n["JNOG00000000"]))
self.check_triples(field, subjects)
@mock.patch("superphy.upload._sparql.find_syndrome")
@mock.patch("superphy.upload._sparql.find_source")
@mock.patch("superphy.upload._sparql.find_from_host")
def test_CompletedGenome(self, mock_host, mock_source, mock_syndrome):
kwargs = {"isolation_date": {"2013-06-24"},
"isolation_location": {"United States, California, Santa Clara"},
"accession": {"JNOG00000000"},
"bioproject": {"251898"},
"biosample": {"2841129"},
"strain": {"CS03"},
"organism": "ecoli",
"isolation_host": {"Homo sapiens (human)"},
"isolation_source": {"Feces"},
"syndrome": {"Urinary tract infection (cystitis)"},
"Htype": "-",
"Otype": None,
}
mock_host.return_value = "https://github.com/superphy#from_hsapiens"
mock_source.return_value = "https://github.com/superphy#feces"
mock_syndrome.return_value = "https://github.com/superphy#uti"
completed_genome = classes.CompletedGenome(self.graph, "JNOG00000000", **kwargs)
completed_genome.rdf()
self.assertEqual(len(mock_host.mock_calls), 1)
self.assertEqual(len(mock_source.mock_calls), 1)
self.assertEqual(len(mock_syndrome.mock_calls), 1)
field = {"https://github.com/superphy#H-",
"https://github.com/superphy#from_hsapiens",
"United States, California, Santa Clara",
"JNOG00000000",
"https://github.com/superphy#ecoli",
"https://github.com/superphy#feces",
"2841129",
"https://github.com/superphy#uti",
"251898",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"CS03",
"2013-06-24",
"http://www.biointerchange.org/gfvo#Genome",
"https://github.com/superphy#completed_genome"}
objects = list(self.graph.objects(n["JNOG00000000"]))
self.check_triples(field, objects)
field = {"https://github.com/superphy#uti",
"https://github.com/superphy#from_hsapiens",
"https://github.com/superphy#H-",
"https://github.com/superphy#feces",
"https://github.com/superphy#ecoli"}
subjects = list(self.graph.subjects(object=n["JNOG00000000"]))
self.check_triples(field, subjects)
@mock.patch("superphy.upload._sparql.check_validation")
def test_Contig(self, mock_validation):
# Testing general contig without sequence validation
contig = classes.Contig(self.graph, "ANVW01000001", "ANVW00000000", "ATGATGATGATAGAGATGAGAT", "WGS")
contig.rdf()
field = {"http://www.w3.org/2002/07/owl#NamedIndividual",
"http://www.biointerchange.org/gfvo#Contig",
"ATGATGATGATAGAGATGAGAT",
"WGS",
"https://github.com/superphy#ANVW00000000"
}
objects = list(self.graph.objects(n["ANVW01000001"]))
self.check_triples(field, objects)
# Testing a false validation with the genome validated (i.e. skips if case)
mock_validation.return_value = True
contig.add_seq_validation(False)
self.assertEqual(len(mock_validation.mock_calls), 1)
field = {"False"}
objects = list(self.graph.objects(predicate=n.has_valid_sequence))
self.check_triples(field, objects)
# Testing a true validation
mock_validation.reset_mock()
mock_validation.return_value = False
contig.add_seq_validation(True)
field = {"True"}
objects = list(self.graph.objects(subject=n["ANVW00000000"], predicate=n.has_valid_sequence))
for thing in objects:
print thing
self.check_triples(field, objects)
def test_Gene(self):
kwargs = {"category": {"Adherence"},
"subcategory": {"AAF_II_fimbriae"},
"gene_type": {"virulence_factor"},
"vfo_id": {"3000001"}
}
gene = classes.Gene(self.graph, "hlyA", **kwargs)
gene.rdf()
field = {"http://www.biointerchange.org/gfvo#gene",
"3000001",
"Adherence",
"AAF_II_fimbriae",
"http://www.w3.org/2002/07/owl#NamedIndividual",
"https://github.com/superphy#virulence_factor"
}
objects = list(self.graph.objects(n["hlyA"]))
print objects
self.check_triples(field, objects)
def test_GeneLocation(self):
# Testing a reference gene
genelocation = classes.GeneLocation(self.graph, "senB_ADUP01000050_0", "senB", "ADUP01000050", "146606", "147859",
"ATGGATATTTGGCGGGGACATTCGTTTCTGATGACAATTTCCGCT", True)
genelocation.rdf()
field = {"http://www.w3.org/2002/07/owl#NamedIndividual",
"http://biohackathon.org/resource/faldo#Region",
"https://github.com/superphy#ADUP01000050",
"https://github.com/superphy#reference_gene",
"https://github.com/superphy#senB_ADUP01000050_0_begin",
"https://github.com/superphy#senB_ADUP01000050_0_end",
"ATGGATATTTGGCGGGGACATTCGTTTCTGATGACAATTTCCGCT"
}
objects = list(self.graph.objects(n["senB_ADUP01000050_0"]))
self.check_triples(field, objects)
field = ["https://github.com/superphy#senB_ADUP01000050_0",
"http://www.biointerchange.org/gfvo#gene",
"https://github.com/superphy#senB_ADUP01000050_0"]
objects = list(self.graph.objects(n["senB"]))
self.check_triples(field, objects)
field = {"http://biohackathon.org/resource/faldo#Position",
"http://biohackathon.org/resource/faldo#ExactPosition",
"http://biohackathon.org/resource/faldo#ForwardStrandPosition",
"146606",
"https://github.com/superphy#ADUP01000050"
}
objects = list(self.graph.objects(n["senB_ADUP01000050_0_begin"]))
for thing in objects:
print thing
self.check_triples(field, objects)
if __name__ == '__main__':
unittest.main()
|
from django.contrib.auth.models import User
from django.test.client import Client
from survey.forms.aboutus_form import AboutUsForm
from survey.models import Survey, AboutUs, SuccessStories
from survey.tests.base_test import BaseTest
from django.core.urlresolvers import reverse
class HomepageViewTest(BaseTest):
def setUp(self):
self.client = Client()
User.objects.create_user(username='useless', email='demo3@kant.com',
password='I_Suck')
raj = self.assign_permission_to(
User.objects.create_user(
'demo3',
'demo3@kant.com',
'demo3'),
'can_view_users')
self.client.login(username='demo3', password='demo3')
def test_home_page(self):
response = self.client.get('/')
self.failUnlessEqual(response.status_code, 200)
templates = [template.name for template in response.templates]
self.assertIn('main/index.html', templates)
def test_no_content_available_on_about_page(self):
response = self.client.get(reverse('about_page'))
self.failUnlessEqual(response.status_code, 200)
templates = [template.name for template in response.templates]
self.assertIn('main/about.html', templates)
about_us = AboutUs.objects.all()[0]
self.assertEqual(about_us, response.context['about_content'])
self.assertEqual(about_us.content, 'No content available yet !!')
def test_about_page(self):
about_us_content = AboutUs.objects.create(content="blah blah")
response = self.client.get(reverse('about_page'))
self.failUnlessEqual(response.status_code, 200)
templates = [template.name for template in response.templates]
self.assertIn('main/about.html', templates)
self.assertEqual(about_us_content, response.context['about_content'])
def test_get_edit_about_page(self):
about_us_content = AboutUs.objects.create(content="blah blah")
response = self.client.get(reverse('edit_about_page'))
self.failUnlessEqual(response.status_code, 200)
templates = [template.name for template in response.templates]
self.assertIn('home/edit.html', templates)
self.assertEqual(about_us_content, response.context[
'about_form'].instance)
self.assertIsInstance(response.context['about_form'], AboutUsForm)
def test_post_edit_about_page(self):
about_us_content = AboutUs.objects.create(content="blah blah")
form_data = {'content': about_us_content.content +
"more blah blah blah"}
self.failIf(AboutUs.objects.filter(**form_data))
response = self.client.post(reverse('edit_about_page'), data=form_data)
self.assertRedirects(response, reverse('about_page'))
self.failUnless(AboutUs.objects.filter(**form_data))
message = "About us content successfully updated"
self.assertIn(message, response.cookies['messages'].value)
def test_restricted_permssion(self):
AboutUs.objects.create(content="blah blah")
self.assert_restricted_permission_for(reverse('edit_about_page'))
def test_success_story_page(self):
ss_content = SuccessStories.objects.create(name='abc',content="blah blah",image='1.jpg')
response = self.client.get(reverse('home_success_story_list'))
self.failUnlessEqual(response.status_code, 200)
templates = [template.name for template in response.templates]
self.assertIn('main/home_success_story_list.html', templates)
self.assertIn(ss_content, response.context['ss_list'])
home views chages
from django.contrib.auth.models import User, Group
from django.test.client import Client
from survey.forms.aboutus_form import AboutUsForm
from survey.models import *
from model_mommy import mommy
from survey.tests.base_test import BaseTest
from django.core.urlresolvers import reverse
class HomepageViewTest(BaseTest):
def setUp(self):
self.client = Client()
User.objects.create_user(username='useless', email='demo3@kant.com',
password='I_Suck')
raj = self.assign_permission_to(
User.objects.create_user(
'demo3',
'demo3@kant.com',
'demo3'),
'can_view_users')
self.assign_permission_to(raj, 'can_have_super_powers')
self.client.login(username='demo3', password='demo3')
def test_home_page(self):
survey_obj = mommy.make(Survey)
url = reverse('main_page')
url = url+'?survey%s'%survey_obj.id
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
templates = [template.name for template in response.templates]
self.assertIn('main/index.html', templates)
def test_no_content_available_on_about_page(self):
response = self.client.get(reverse('about_page'))
self.failUnlessEqual(response.status_code, 200)
templates = [template.name for template in response.templates]
self.assertIn('main/about.html', templates)
about_us = AboutUs.objects.all()[0]
self.assertEqual(about_us, response.context['about_content'])
self.assertEqual(about_us.content, 'No content available yet !!')
def test_about_page(self):
about_us_content = AboutUs.objects.create(content="blah blah")
response = self.client.get(reverse('about_page'))
self.failUnlessEqual(response.status_code, 200)
templates = [template.name for template in response.templates]
self.assertIn('main/about.html', templates)
self.assertEqual(about_us_content, response.context['about_content'])
def test_get_edit_about_page(self):
about_us_content = AboutUs.objects.create(content="blah blah")
response = self.client.get(reverse('edit_about_page'))
self.failUnlessEqual(response.status_code, 200)
templates = [template.name for template in response.templates]
self.assertIn('home/edit.html', templates)
self.assertEqual(about_us_content, response.context[
'about_form'].instance)
self.assertIsInstance(response.context['about_form'], AboutUsForm)
def test_post_edit_about_page(self):
about_us_content = AboutUs.objects.create(content="blah blah")
form_data = {'content': about_us_content.content +
"more blah blah blah"}
self.failIf(AboutUs.objects.filter(**form_data))
response = self.client.post(reverse('edit_about_page'), data=form_data)
self.assertRedirects(response, reverse('about_page'))
self.failUnless(AboutUs.objects.filter(**form_data))
message = "About us content successfully updated"
self.assertIn(message, response.cookies['messages'].value)
def test_restricted_permssion(self):
AboutUs.objects.create(content="blah blah")
self.assert_restricted_permission_for(reverse('edit_about_page'))
def test_success_story_page(self):
ss_content = SuccessStories.objects.create(name='abc',content="blah blah",image='1.jpg')
response = self.client.get(reverse('home_success_story_list'))
self.failUnlessEqual(response.status_code, 200)
templates = [template.name for template in response.templates]
self.assertIn('main/home_success_story_list.html', templates)
self.assertIn(ss_content, response.context['ss_list'])
def test_activate_super_powers(self):
some_group = Group.objects.create(name='Administrator')
form_data = {
'username': 'knight111hpppl',
'password': 'mk',
'first_name': 'demo',
'last_name': 'knight',
'mobile_number': '123456789',
'email': 'mm@mm.mm',
'groups': some_group.id,
}
user = User.objects.create(
username=form_data['username'],
email=form_data['email'],
password=form_data['password'],
first_name=form_data['first_name'],
last_name=form_data['last_name'])
UserProfile.objects.create(
user=user, mobile_number=form_data['mobile_number'])
url = reverse('activate_super_powers_page')
response = self.client.get(url) |
# coding: utf8
#
# urlfetch
# ~~~~~~~~
#
# An easy to use HTTP client based on httplib.
#
# :copyright: (c) 2011 Elyes Du.
# :license: BSD, see LICENSE for more details.
#
__version__ = '0.3.6'
__author__ = 'Elyes Du <lyxint@gmail.com>'
__url__ = 'https://github.com/lyxint/urlfetch'
from . import util
from . import uas
if util.py3k:
from http.client import HTTPConnection, HTTPSConnection, HTTPException
from http.client import HTTP_PORT, HTTPS_PORT
from urllib.parse import urlencode, quote as urlquote, quote_plus as urlquote_plus
import urllib.parse as urlparse
import http.cookies as Cookie
basestring = (str, bytes)
def b(s):
return s.encode('latin-1')
def u(s):
return s
else:
from httplib import HTTPConnection, HTTPSConnection, HTTPException
from httplib import HTTP_PORT, HTTPS_PORT
from urllib import urlencode, quote as urlquote, quote_plus as urlquote_plus
import urlparse
import Cookie
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
import socket
import base64
from functools import partial
import os
from io import BytesIO
import codecs
writer = codecs.lookup('utf-8')[3]
__all__ = [
'sc2cs', 'fetch', 'request',
'get', 'head', 'put', 'post', 'delete', 'options',
'Headers', 'UrlfetchException',
]
_allowed_methods = ("GET", "DELETE", "HEAD", "OPTIONS", "PUT", "POST", "TRACE", "PATCH")
class UrlfetchException(Exception): pass
def sc2cs(sc):
'''convert response.getheader('set-cookie') to cookie string
Args:
sc (str): The Set-Cookie string
you can get it from::
>>> sc = response.getheader('Set-Cookie')
Returns:
str. cookie string, name=value pairs, joined by `'; '`
'''
c = Cookie.SimpleCookie(sc)
sc = ['%s=%s' % (i.key, i.value) for i in c.itervalues()]
return '; '.join(sc)
_boundary_prefix = None
def choose_boundary():
global _boundary_prefix
if _boundary_prefix is None:
_boundary_prefix = "urlfetch"
import os
try:
uid = repr(os.getuid())
_boundary_prefix += "." + uid
except AttributeError: pass
try:
pid = repr(os.getpid())
_boundary_prefix += "." + pid
except AttributeError: pass
import uuid
return "(*^__^*)%s.%s" % (_boundary_prefix, uuid.uuid4().hex)
def _encode_multipart(data, files):
body = BytesIO()
boundary = choose_boundary()
part_boundary = b('--%s\r\n' % boundary)
if isinstance(data, dict):
for name, value in data.items():
body.write(part_boundary)
writer(body).write('Content-Disposition: form-data; name="%s"\r\n' % name)
body.write(b'Content-Type: text/plain\r\n\r\n')
if util.py3k and isinstance(value, str):
writer(body).write(value)
else:
body.write(value)
body.write(b'\r\n')
for fieldname, f in files.items():
if isinstance(f, tuple):
filename, f = f
elif hasattr(f, 'name'):
filename = os.path.basename(f.name)
else:
filename = None
raise UrlfetchException("file must has filename")
if hasattr(f, 'read'):
value = f.read()
elif isinstance(f, basestring):
value = f
else:
value = str(f)
body.write(part_boundary)
if filename:
writer(body).write('Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (fieldname, filename))
body.write(b'Content-Type: application/octet-stream\r\n\r\n')
else:
writer(body).write('Content-Disposition: form-data; name="%s"\r\n' % name)
body.write(b'Content-Type: text/plain\r\n\r\n')
if util.py3k and isinstance(value, str):
writer(body).write(value)
else:
body.write(value)
body.write(b'\r\n')
body.write(b('--' + boundary + '--\r\n'))
content_type = 'multipart/form-data; boundary=%s' % boundary
#body.write(b(content_type))
return content_type, body.getvalue()
class Headers(object):
''' Headers
to simplify fetch() interface, class Headers helps to manipulate parameters
'''
def __init__(self):
''' make default headers '''
self.__headers = {
'Accept': '*/*',
'User-Agent': 'urlfetch/' + __version__,
}
def random_user_agent(self):
''' generate random User-Agent string from uas.py collection '''
self.__headers['User-Agent'] = uas.randua()
def auth(self, username, password):
''' add username/password for basic authentication '''
auth = '%s:%s' % (username, password)
auth = base64.b64encode(auth.encode('utf-8'))
self.__headers['Authorization'] = 'Basic ' + auth.decode('utf-8')
def items(self):
''' return headers dictionary '''
return self.__headers
class Response(object):
def __init__(self, r, **kwargs):
self._r = r
self.msg = r.msg
self.status = r.status
self.length = r.length
self.reason = r.reason
self.version = r.version
self._body = None
self._headers = None
self._text = None
self.getheader = r.getheader
self.getheaders = r.getheaders
for k in kwargs:
setattr(self, k, kwargs[k])
if kwargs.get('prefetch', False):
self._body = self._r.read()
self.close()
@classmethod
def from_httplib(cls, r, **kwargs):
return cls(r, **kwargs)
@property
def body(self):
if self._body is None:
self._body = self._r.read()
return self._body
@property
def text(self):
if self._text is None:
self._text = util.mb_code(self.body)
return self._text
@property
def headers(self):
if self._headers is None:
self._headers = dict((k.lower(), v) for k, v in self._r.getheaders())
return self._headers
def close(self):
if hasattr(self, 'connection'):
self.connection.close()
self._r.close()
def __del__(self):
self.close()
def fetch(url, data=None, headers={}, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
files={}, prefetch=True):
''' fetch url
Args:
url (str): url to fetch
Kwargs:
data (dict/str): The post data, it can be dict or string
headers (dict): The request headers
timeout (double): The timeout
files (dict): key is field name, value is (filename, fileobj) OR simply fileobj.
fileobj can be a file descriptor open for read or simply string
prefetch (bool): True for prefetching response body
Returns:
response object
.. note::
Default headers: {'Accept': '\*/\*'}
'''
local = locals()
if data is not None and isinstance(data, (basestring, dict)):
return post(**local)
return get(**local)
def request(url, method="GET", data=None, headers={}, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
files={}, prefetch=True):
''' request a url
Args:
url (str): url to fetch
Kwargs:
method (str): The request method, 'GET', 'POST', 'HEAD', 'PUT' OR 'DELETE'
data (dict/str): The data, it can be dict or string, used for POST or PUT requests
headers (dict): The request headers
timeout (double): The timeout
files (dict): key is field name, value is (filename, fileobj) OR simply fileobj.
fileobj can be a file descriptor open for read or simply string
prefetch (bool): True for prefetching response body
Returns:
response object
.. note::
Default headers: {'Accept': '\*/\*'}
'''
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
method = method.upper()
if method not in _allowed_methods:
raise UrlfetchException("Method shoud be one of " + ", ".join(_allowed_methods))
requrl = path
if query: requrl += '?' + query
# do not add fragment
#if fragment: requrl += '#' + fragment
# handle 'Host'
if ':' in netloc:
host, port = netloc.rsplit(':', 1)
port = int(port)
else:
host, port = netloc, None
host = host.encode('idna').decode('utf-8')
if scheme == 'https':
h = HTTPSConnection(host, port=port, timeout=timeout)
elif scheme == 'http':
h = HTTPConnection(host, port=port, timeout=timeout)
else:
raise UrlfetchException('Unsupported protocol %s' % scheme)
# default request headers
reqheaders = Headers().items()
if files:
content_type, data = _encode_multipart(data, files)
reqheaders['Content-Type'] = content_type
elif isinstance(data, dict):
data = urlencode(data, 1)
if isinstance(data, basestring) and not files:
# httplib will set 'Content-Length', also you can set it by yourself
reqheaders["Content-Type"] = "application/x-www-form-urlencoded"
# what if the method is GET, HEAD or DELETE
# just do not make so much decisions for users
for k, v in headers.items():
reqheaders[k.title()] = v
h.request(method, requrl, data, reqheaders)
response = h.getresponse()
return Response.from_httplib(response, prefetch=prefetch, reqheaders=reqheaders, connection=h)
# some shortcuts
get = partial(request, method="GET")
post = partial(request, method="POST")
put = partial(request, method="PUT")
delete = partial(request, method="DELETE")
head = partial(request, method="HEAD")
options = partial(request, method="OPTIONS")
# No entity body can be sent with a TRACE request.
trace = partial(request, method="TRACE", files={}, data=None)
patch = partial(request, method="PATCH")
added some comments
# coding: utf8
#
# urlfetch
# ~~~~~~~~
#
# An easy to use HTTP client based on httplib.
#
# :copyright: (c) 2011 Elyes Du.
# :license: BSD, see LICENSE for more details.
#
__version__ = '0.3.6'
__author__ = 'Elyes Du <lyxint@gmail.com>'
__url__ = 'https://github.com/lyxint/urlfetch'
from . import util
from . import uas
if util.py3k:
from http.client import HTTPConnection, HTTPSConnection, HTTPException
from http.client import HTTP_PORT, HTTPS_PORT
from urllib.parse import urlencode, quote as urlquote, quote_plus as urlquote_plus
import urllib.parse as urlparse
import http.cookies as Cookie
basestring = (str, bytes)
def b(s):
return s.encode('latin-1')
def u(s):
return s
else:
from httplib import HTTPConnection, HTTPSConnection, HTTPException
from httplib import HTTP_PORT, HTTPS_PORT
from urllib import urlencode, quote as urlquote, quote_plus as urlquote_plus
import urlparse
import Cookie
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
import socket
import base64
from functools import partial
import os
from io import BytesIO
import codecs
writer = codecs.lookup('utf-8')[3]
__all__ = [
'sc2cs', 'fetch', 'request',
'get', 'head', 'put', 'post', 'delete', 'options',
'Headers', 'UrlfetchException',
]
_allowed_methods = ("GET", "DELETE", "HEAD", "OPTIONS", "PUT", "POST", "TRACE", "PATCH")
class UrlfetchException(Exception): pass
def sc2cs(sc):
'''convert response.getheader('set-cookie') to cookie string
Args:
sc (str): The Set-Cookie string
you can get it from::
>>> sc = response.getheader('Set-Cookie')
Returns:
str. cookie string, name=value pairs, joined by `'; '`
'''
c = Cookie.SimpleCookie(sc)
sc = ['%s=%s' % (i.key, i.value) for i in c.itervalues()]
return '; '.join(sc)
_boundary_prefix = None
def choose_boundary():
global _boundary_prefix
if _boundary_prefix is None:
_boundary_prefix = "urlfetch"
import os
try:
uid = repr(os.getuid())
_boundary_prefix += "." + uid
except AttributeError: pass
try:
pid = repr(os.getpid())
_boundary_prefix += "." + pid
except AttributeError: pass
import uuid
return "(*^__^*)%s.%s" % (_boundary_prefix, uuid.uuid4().hex)
def _encode_multipart(data, files):
body = BytesIO()
boundary = choose_boundary()
part_boundary = b('--%s\r\n' % boundary)
if isinstance(data, dict):
for name, value in data.items():
body.write(part_boundary)
writer(body).write('Content-Disposition: form-data; name="%s"\r\n' % name)
body.write(b'Content-Type: text/plain\r\n\r\n')
if util.py3k and isinstance(value, str):
writer(body).write(value)
else:
body.write(value)
body.write(b'\r\n')
for fieldname, f in files.items():
if isinstance(f, tuple):
filename, f = f
elif hasattr(f, 'name'):
filename = os.path.basename(f.name)
else:
filename = None
raise UrlfetchException("file must has filename")
if hasattr(f, 'read'):
value = f.read()
elif isinstance(f, basestring):
value = f
else:
value = str(f)
body.write(part_boundary)
if filename:
writer(body).write('Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (fieldname, filename))
body.write(b'Content-Type: application/octet-stream\r\n\r\n')
else:
writer(body).write('Content-Disposition: form-data; name="%s"\r\n' % name)
body.write(b'Content-Type: text/plain\r\n\r\n')
if util.py3k and isinstance(value, str):
writer(body).write(value)
else:
body.write(value)
body.write(b'\r\n')
body.write(b('--' + boundary + '--\r\n'))
content_type = 'multipart/form-data; boundary=%s' % boundary
#body.write(b(content_type))
return content_type, body.getvalue()
class Headers(object):
''' Headers
to simplify fetch() interface, class Headers helps to manipulate parameters
'''
def __init__(self):
''' make default headers '''
self.__headers = {
'Accept': '*/*',
'User-Agent': 'urlfetch/' + __version__,
}
def random_user_agent(self):
''' generate random User-Agent string from uas.py collection '''
self.__headers['User-Agent'] = uas.randua()
def auth(self, username, password):
''' add username/password for basic authentication '''
auth = '%s:%s' % (username, password)
auth = base64.b64encode(auth.encode('utf-8'))
self.__headers['Authorization'] = 'Basic ' + auth.decode('utf-8')
def items(self):
''' return headers dictionary '''
return self.__headers
class Response(object):
def __init__(self, r, **kwargs):
self._r = r # httplib.HTTPResponse
self.msg = r.msg
self.status = r.status
self.length = r.length
self.reason = r.reason
self.version = r.version
self._body = None
self._headers = None
self._text = None
self.getheader = r.getheader
self.getheaders = r.getheaders
for k in kwargs:
setattr(self, k, kwargs[k])
if kwargs.get('prefetch', False):
self._body = self._r.read()
self.close()
@classmethod
def from_httplib(cls, r, **kwargs):
return cls(r, **kwargs)
@property
def body(self):
if self._body is None:
self._body = self._r.read()
return self._body
@property
def text(self):
if self._text is None:
self._text = util.mb_code(self.body)
return self._text
@property
def headers(self):
if self._headers is None:
self._headers = dict((k.lower(), v) for k, v in self._r.getheaders())
return self._headers
def close(self):
if hasattr(self, 'connection'):
self.connection.close()
self._r.close()
def __del__(self):
self.close()
def fetch(url, data=None, headers={}, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
files={}, prefetch=True):
''' fetch url
Args:
url (str): url to fetch
Kwargs:
data (dict/str): The post data, it can be dict or string
headers (dict): The request headers
timeout (double): The timeout
files (dict): key is field name, value is (filename, fileobj) OR simply fileobj.
fileobj can be a file descriptor open for read or simply string
prefetch (bool): True for prefetching response body
Returns:
response object
.. note::
Default headers: {'Accept': '\*/\*'}
'''
local = locals()
if data is not None and isinstance(data, (basestring, dict)):
return post(**local)
return get(**local)
def request(url, method="GET", data=None, headers={}, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
files={}, prefetch=True):
''' request a url
Args:
url (str): url to fetch
Kwargs:
method (str): The request method, 'GET', 'POST', 'HEAD', 'PUT' OR 'DELETE'
data (dict/str): The data, it can be dict or string, used for POST or PUT requests
headers (dict): The request headers
timeout (double): The timeout
files (dict): key is field name, value is (filename, fileobj) OR simply fileobj.
fileobj can be a file descriptor open for read or simply string
prefetch (bool): True for prefetching response body
Returns:
response object
.. note::
Default headers: {'Accept': '\*/\*'}
'''
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
method = method.upper()
if method not in _allowed_methods:
raise UrlfetchException("Method shoud be one of " + ", ".join(_allowed_methods))
requrl = path
if query: requrl += '?' + query
# do not add fragment
#if fragment: requrl += '#' + fragment
# handle 'Host'
if ':' in netloc:
host, port = netloc.rsplit(':', 1)
port = int(port)
else:
host, port = netloc, None
host = host.encode('idna').decode('utf-8')
if scheme == 'https':
h = HTTPSConnection(host, port=port, timeout=timeout)
elif scheme == 'http':
h = HTTPConnection(host, port=port, timeout=timeout)
else:
raise UrlfetchException('Unsupported protocol %s' % scheme)
# default request headers
reqheaders = Headers().items()
if files:
content_type, data = _encode_multipart(data, files)
reqheaders['Content-Type'] = content_type
elif isinstance(data, dict):
data = urlencode(data, 1)
if isinstance(data, basestring) and not files:
# httplib will set 'Content-Length', also you can set it by yourself
reqheaders["Content-Type"] = "application/x-www-form-urlencoded"
# what if the method is GET, HEAD or DELETE
# just do not make so much decisions for users
for k, v in headers.items():
reqheaders[k.title()] = v
h.request(method, requrl, data, reqheaders)
response = h.getresponse()
return Response.from_httplib(response, prefetch=prefetch, reqheaders=reqheaders, connection=h)
# some shortcuts
get = partial(request, method="GET")
post = partial(request, method="POST")
put = partial(request, method="PUT")
delete = partial(request, method="DELETE")
head = partial(request, method="HEAD")
options = partial(request, method="OPTIONS")
# No entity body can be sent with a TRACE request.
trace = partial(request, method="TRACE", files={}, data=None)
patch = partial(request, method="PATCH")
|
#!/usr/bin/env python
import sys
import os
import optparse
from glue.qt.glue_application import GlueApplication
def parse(argv):
""" Parse argument list, check validity
:param argv: Arguments passed to program
*Returns*
A tuple of options, position arguments
"""
usage = "usage: %prog [options] [FILE]"
parser = optparse.OptionParser(usage=usage)
parser.add_option('-x', '--execute', action='store_true', dest='script',
help="Execute FILE as a python script", default=False)
parser.add_option('-g', action='store_true', dest='restore',
help="Restore glue session from FILE", default=False)
parser.add_option('-c', '--config', type='string', dest='config',
metavar='CONFIG',
help='use CONFIG as configuration file')
err_msg = verify(parser, argv)
if err_msg:
sys.stderr.write('\n%s\n' % err_msg)
parser.print_help()
sys.exit(1)
return parser.parse_args(argv)
def verify(parser, argv):
""" Check for input errors
:param parser: OptionParser instance
:param argv: Argument list
:type argv: List of strings
*Returns*
An error message, or None
"""
opts, args = parser.parse_args(argv)
err_msg = None
if len(args) > 1:
err_msg = "Too many arguments"
elif opts.script and len(args) != 1:
err_msg = "Must provide a script\n"
elif opts.restore and len(args) != 1:
err_msg = "Must provide a .glu file\n"
elif opts.config is not None and not os.path.exists(opts.config):
err_msg = "Could not find configuration file: %s" % opts.config
return err_msg
def die_on_error(msg):
"""Decorator that catches errors, displays a popup message, and quits"""
def decorator(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
import traceback
from PyQt4.QtGui import QMessageBox
m = "%s\n%s" % (msg, e)
detail = str(traceback.format_exc())
qmb = QMessageBox(QMessageBox.Critical, "Error", m)
qmb.setDetailedText(detail)
qmb.exec_()
sys.exit(1)
return wrapper
return decorator
@die_on_error("Error restoring Glue session")
def restore_session(gluefile):
"""Load a .glu file and return a DataCollection, Hub tuple"""
from pickle import Unpickler
with open(gluefile) as f:
state = Unpickler(f).load()
return state
@die_on_error("Error reading data file")
def load_data_files(datafiles):
"""Load data files and return a DataCollection"""
import glue
from glue.core.data_factories import auto_data, load_data
dc = glue.core.DataCollection()
for df in datafiles:
dc.append(load_data(df, auto_data))
return dc
def start_glue(gluefile=None, config=None, datafiles=None):
"""Run a glue session and exit
:param gluefile: An optional .glu file to restore
:type gluefile: str
:param config: An optional configuration file to use
:type config: str
:param datafiles: An optional list of data files to load
:type datafiles: list of str
"""
import glue
datafiles = datafiles or []
data, hub = None, None
if gluefile is not None:
data, hub = restore_session(gluefile)
if config is not None:
glue.env = glue.config.load_configuration(search_path=[config])
if datafiles:
data = load_data_files(datafiles)
hub = hub or glue.core.Hub(data)
ga = GlueApplication(data_collection=data, hub=hub)
sys.exit(ga.exec_())
def execute_script(script):
""" Run a python script and exit.
Provides a way for people with pre-installed binaries to use
the glue library
"""
execfile(script)
sys.exit(0)
def main():
opt, args = parse(sys.argv[1:])
if opt.restore:
start_glue(args[0], config=opt.config)
elif opt.script:
execute_script(args[0])
else:
has_file = len(args) == 1
has_py = has_file and args[0].endswith('.py')
has_glu = has_file and args[0].endswith('.glu')
if has_py:
execute_script(args[0])
elif has_glu:
start_glue(args[0], config=opt.config)
elif has_file:
start_glue(datafiles=[args[0]])
else:
start_glue()
if __name__ == "__main__":
main()
command line prints version number. #95
#!/usr/bin/env python
import sys
import os
import optparse
from glue.qt.glue_application import GlueApplication
from glue import __version__
def parse(argv):
""" Parse argument list, check validity
:param argv: Arguments passed to program
*Returns*
A tuple of options, position arguments
"""
usage = "usage: %prog [options] [FILE]"
parser = optparse.OptionParser(usage=usage,
version="%s" % __version__)
parser.add_option('-x', '--execute', action='store_true', dest='script',
help="Execute FILE as a python script", default=False)
parser.add_option('-g', action='store_true', dest='restore',
help="Restore glue session from FILE", default=False)
parser.add_option('-c', '--config', type='string', dest='config',
metavar='CONFIG',
help='use CONFIG as configuration file')
err_msg = verify(parser, argv)
if err_msg:
sys.stderr.write('\n%s\n' % err_msg)
parser.print_help()
sys.exit(1)
return parser.parse_args(argv)
def verify(parser, argv):
""" Check for input errors
:param parser: OptionParser instance
:param argv: Argument list
:type argv: List of strings
*Returns*
An error message, or None
"""
opts, args = parser.parse_args(argv)
err_msg = None
if len(args) > 1:
err_msg = "Too many arguments"
elif opts.script and len(args) != 1:
err_msg = "Must provide a script\n"
elif opts.restore and len(args) != 1:
err_msg = "Must provide a .glu file\n"
elif opts.config is not None and not os.path.exists(opts.config):
err_msg = "Could not find configuration file: %s" % opts.config
return err_msg
def die_on_error(msg):
"""Decorator that catches errors, displays a popup message, and quits"""
def decorator(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
import traceback
from PyQt4.QtGui import QMessageBox
m = "%s\n%s" % (msg, e)
detail = str(traceback.format_exc())
qmb = QMessageBox(QMessageBox.Critical, "Error", m)
qmb.setDetailedText(detail)
qmb.exec_()
sys.exit(1)
return wrapper
return decorator
@die_on_error("Error restoring Glue session")
def restore_session(gluefile):
"""Load a .glu file and return a DataCollection, Hub tuple"""
from pickle import Unpickler
with open(gluefile) as f:
state = Unpickler(f).load()
return state
@die_on_error("Error reading data file")
def load_data_files(datafiles):
"""Load data files and return a DataCollection"""
import glue
from glue.core.data_factories import auto_data, load_data
dc = glue.core.DataCollection()
for df in datafiles:
dc.append(load_data(df, auto_data))
return dc
def start_glue(gluefile=None, config=None, datafiles=None):
"""Run a glue session and exit
:param gluefile: An optional .glu file to restore
:type gluefile: str
:param config: An optional configuration file to use
:type config: str
:param datafiles: An optional list of data files to load
:type datafiles: list of str
"""
import glue
datafiles = datafiles or []
data, hub = None, None
if gluefile is not None:
data, hub = restore_session(gluefile)
if config is not None:
glue.env = glue.config.load_configuration(search_path=[config])
if datafiles:
data = load_data_files(datafiles)
hub = hub or glue.core.Hub(data)
ga = GlueApplication(data_collection=data, hub=hub)
sys.exit(ga.exec_())
def execute_script(script):
""" Run a python script and exit.
Provides a way for people with pre-installed binaries to use
the glue library
"""
execfile(script)
sys.exit(0)
def main():
opt, args = parse(sys.argv[1:])
if opt.restore:
start_glue(args[0], config=opt.config)
elif opt.script:
execute_script(args[0])
else:
has_file = len(args) == 1
has_py = has_file and args[0].endswith('.py')
has_glu = has_file and args[0].endswith('.glu')
if has_py:
execute_script(args[0])
elif has_glu:
start_glue(args[0], config=opt.config)
elif has_file:
start_glue(datafiles=[args[0]])
else:
start_glue()
if __name__ == "__main__":
main()
|
#
# Author : Manuel Bernal Llinares
# Project : trackhub-creator
# Timestamp : 11-09-2017 11:14
# ---
# © 2017 Manuel Bernal Llinares <mbdebian@gmail.com>
# All rights reserved.
#
"""
Models representing different HPC environments
"""
class HpcServiceFactory:
_HPC_TYPE_LSF = 'lsf'
get_hpc_service factory method, placeholder
#
# Author : Manuel Bernal Llinares
# Project : trackhub-creator
# Timestamp : 11-09-2017 11:14
# ---
# © 2017 Manuel Bernal Llinares <mbdebian@gmail.com>
# All rights reserved.
#
"""
Models representing different HPC environments
"""
class HpcServiceFactory:
_HPC_TYPE_LSF = 'lsf'
@staticmethod
def get_hpc_service():
pass |
# coding: utf-8
from django.shortcuts import render
from django.shortcuts import render
from django.utils.translation import ugettext_lazy
#def _(x): return unicode(ugettext_lazy(x))
def _(x): return unicode(x)
from django.http import HttpResponse, Http404
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.gzip import gzip_page
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.http import require_http_methods
from django.utils import timezone
from datetime import timedelta
import json
from hpqw.models import Connection
from hpqr.settings import bot
from hpqr.settings import HPQR_HOST
from hpqr.settings import HPQR_YANDEX_METRIKA
import hpqw.bot_brain as brain
# Create your views here.
@csrf_exempt
@require_http_methods(["POST"])
def telegram_hook(request):
try:
brain.read_msg(json.loads(request.body)['message'], bot, HPQR_HOST)
finally:
pass
return HttpResponse('hook')
@gzip_page
def index(request):
return render(request, 'index.html', {'bot_getMe':bot.getMe(),
'HPQR_YANDEX_METRIKA' : HPQR_YANDEX_METRIKA})
def robots(request):
return render(request, 'robots.txt')
@gzip_page
def register(request):
return render(request, 'register.html',
{'bot_help_text':brain.help_text,
'bot_getMe':bot.getMe(),
'HPQR_YANDEX_METRIKA' : HPQR_YANDEX_METRIKA})
@gzip_page
def credits(request):
return render(request, 'credits.html', {'HPQR_YANDEX_METRIKA' : HPQR_YANDEX_METRIKA})
@gzip_page
def contact(request):
return render(request, 'contact.html', {'HPQR_YANDEX_METRIKA' : HPQR_YANDEX_METRIKA})
def check_inputs(id, pin):
try:
obj = Connection.objects.get( id = int(id) )
except ObjectDoesNotExist:
#return HttpResponse("DoesNotExist id " + id)
raise Http404(_("ID does not exist"))
if int(pin) != obj.pin:
#return HttpResponse("Wrong pin " + pin)
raise Http404(_("Wrong pin"))
def print_page(request, id, pin):
check_inputs(id, pin)
message_link = HPQR_HOST +"/" + id + "." + pin
return render(request, 'print.html', {'message_link':message_link, 'HPQR_YANDEX_METRIKA' : HPQR_YANDEX_METRIKA})
@gzip_page
def connection(request, id, pin):
check_inputs(id, pin)
con = Connection.objects.get(id=id)
if timezone.now() > con.wait_till: # This is small spam protection
con.message = ""
con.wait_till = timezone.now() + timedelta(minutes = 1)
con.save()
car_id = u""
if con.car_id != "":
car_id = u" [%s]." % con.car_id
specific = u" id=" + unicode(str(con.id)) + car_id
show_keyboard = {'keyboard': [['1 minute'+specific,'2 minute'+specific], ['5 minute'+specific,'60 minute'+specific + ' (block spam)']]}
bot.sendMessage(con.telegram_id,
_("Кто-то ожидает вас у машины " ) + specific + _(". Когда вы подойдёте?") ,
reply_markup=show_keyboard)
reply_message = con.message
reply_time = (con.wait_till - timezone.now()).seconds
#return HttpResponse("Good!: " + id + " -> " + pin)
return render(request, 'connection.html',
{'id':id, 'pin':pin, 'reply_message':reply_message, 'reply_time':reply_time, 'HPQR_YANDEX_METRIKA' : HPQR_YANDEX_METRIKA})
unicode removed from connect view
# coding: utf-8
from django.shortcuts import render
from django.shortcuts import render
from django.utils.translation import ugettext_lazy
#def _(x): return unicode(ugettext_lazy(x))
def _(x): return unicode(x)
from django.http import HttpResponse, Http404
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.gzip import gzip_page
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.http import require_http_methods
from django.utils import timezone
from datetime import timedelta
import json
from hpqw.models import Connection
from hpqr.settings import bot
from hpqr.settings import HPQR_HOST
from hpqr.settings import HPQR_YANDEX_METRIKA
import hpqw.bot_brain as brain
# Create your views here.
@csrf_exempt
@require_http_methods(["POST"])
def telegram_hook(request):
try:
brain.read_msg(json.loads(request.body)['message'], bot, HPQR_HOST)
finally:
pass
return HttpResponse('hook')
@gzip_page
def index(request):
return render(request, 'index.html', {'bot_getMe':bot.getMe(),
'HPQR_YANDEX_METRIKA' : HPQR_YANDEX_METRIKA})
def robots(request):
return render(request, 'robots.txt')
@gzip_page
def register(request):
return render(request, 'register.html',
{'bot_help_text':brain.help_text,
'bot_getMe':bot.getMe(),
'HPQR_YANDEX_METRIKA' : HPQR_YANDEX_METRIKA})
@gzip_page
def credits(request):
return render(request, 'credits.html', {'HPQR_YANDEX_METRIKA' : HPQR_YANDEX_METRIKA})
@gzip_page
def contact(request):
return render(request, 'contact.html', {'HPQR_YANDEX_METRIKA' : HPQR_YANDEX_METRIKA})
def check_inputs(id, pin):
try:
obj = Connection.objects.get( id = int(id) )
except ObjectDoesNotExist:
#return HttpResponse("DoesNotExist id " + id)
raise Http404(_("ID does not exist"))
if int(pin) != obj.pin:
#return HttpResponse("Wrong pin " + pin)
raise Http404(_("Wrong pin"))
def print_page(request, id, pin):
check_inputs(id, pin)
message_link = HPQR_HOST +"/" + id + "." + pin
return render(request, 'print.html', {'message_link':message_link, 'HPQR_YANDEX_METRIKA' : HPQR_YANDEX_METRIKA})
@gzip_page
def connection(request, id, pin):
check_inputs(id, pin)
con = Connection.objects.get(id=id)
if timezone.now() > con.wait_till: # This is small spam protection
con.message = ""
con.wait_till = timezone.now() + timedelta(minutes = 1)
con.save()
car_id = ""
if con.car_id != "":
car_id = " [%s]." % con.car_id
specific = " id=" + str(con.id) + car_id
show_keyboard = {'keyboard': [[u'1 minute'+specific,u'2 minute'+specific], [u'5 minute'+specific,u'60 minute'+specific + u' (block spam)']]}
bot.sendMessage(con.telegram_id,
"Кто-то ожидает вас у машины " + specific + ". Когда вы подойдёте?" ,
reply_markup=show_keyboard)
reply_message = con.message
reply_time = (con.wait_till - timezone.now()).seconds
#return HttpResponse("Good!: " + id + " -> " + pin)
return render(request, 'connection.html',
{'id':id, 'pin':pin, 'reply_message':reply_message, 'reply_time':reply_time, 'HPQR_YANDEX_METRIKA' : HPQR_YANDEX_METRIKA})
|
from datetime import datetime, timedelta, time
from flask import current_app
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy import func, case, desc, Date, Integer
from app import db
from app.dao.date_util import get_financial_year
from app.models import (
FactBilling,
Notification,
Service,
KEY_TYPE_TEST,
LETTER_TYPE,
SMS_TYPE,
Rate,
LetterRate,
NOTIFICATION_STATUS_TYPES_BILLABLE,
NotificationHistory,
EMAIL_TYPE
)
from app.utils import convert_utc_to_bst, convert_bst_to_utc
def fetch_billing_totals_for_year(service_id, year):
year_start_date, year_end_date = get_financial_year(year)
"""
Billing for email: only record the total number of emails.
Billing for letters: The billing units is used to fetch the correct rate for the sheet count of the letter.
Total cost is notifications_sent * rate.
Rate multiplier does not apply to email or letters.
"""
email_and_letters = db.session.query(
func.sum(FactBilling.notifications_sent).label("notifications_sent"),
func.sum(FactBilling.notifications_sent).label("billable_units"),
FactBilling.rate.label('rate'),
FactBilling.notification_type.label('notification_type')
).filter(
FactBilling.service_id == service_id,
FactBilling.bst_date >= year_start_date,
FactBilling.bst_date <= year_end_date,
FactBilling.notification_type.in_([EMAIL_TYPE, LETTER_TYPE])
).group_by(
FactBilling.rate,
FactBilling.notification_type
)
"""
Billing for SMS using the billing_units * rate_multiplier. Billing unit of SMS is the fragment count of a message
"""
sms = db.session.query(
func.sum(FactBilling.notifications_sent).label("notifications_sent"),
func.sum(FactBilling.billable_units * FactBilling.rate_multiplier).label("billable_units"),
FactBilling.rate,
FactBilling.notification_type
).filter(
FactBilling.service_id == service_id,
FactBilling.bst_date >= year_start_date,
FactBilling.bst_date <= year_end_date,
FactBilling.notification_type == SMS_TYPE
).group_by(
FactBilling.rate,
FactBilling.notification_type
)
yearly_data = email_and_letters.union_all(sms).order_by(
'notification_type',
'rate'
).all()
return yearly_data
def fetch_monthly_billing_for_year(service_id, year):
year_start_date, year_end_date = get_financial_year(year)
utcnow = datetime.utcnow()
today = convert_utc_to_bst(utcnow)
# if year end date is less than today, we are calculating for data in the past and have no need for deltas.
if year_end_date >= today:
yesterday = today - timedelta(days=1)
for day in [yesterday, today]:
data = fetch_billing_data_for_day(process_day=day, service_id=service_id)
for d in data:
update_fact_billing(data=d, process_day=day)
email_and_letters = db.session.query(
func.date_trunc('month', FactBilling.bst_date).cast(Date).label("month"),
func.sum(FactBilling.notifications_sent).label("notifications_sent"),
func.sum(FactBilling.notifications_sent).label("billable_units"),
FactBilling.rate.label('rate'),
FactBilling.notification_type.label('notification_type')
).filter(
FactBilling.service_id == service_id,
FactBilling.bst_date >= year_start_date,
FactBilling.bst_date <= year_end_date,
FactBilling.notification_type.in_([EMAIL_TYPE, LETTER_TYPE])
).group_by(
'month',
FactBilling.rate,
FactBilling.notification_type
)
sms = db.session.query(
func.date_trunc('month', FactBilling.bst_date).cast(Date).label("month"),
func.sum(FactBilling.notifications_sent).label("notifications_sent"),
func.sum(FactBilling.billable_units * FactBilling.rate_multiplier).label("billable_units"),
FactBilling.rate,
FactBilling.notification_type
).filter(
FactBilling.service_id == service_id,
FactBilling.bst_date >= year_start_date,
FactBilling.bst_date <= year_end_date,
FactBilling.notification_type == SMS_TYPE
).group_by(
'month',
FactBilling.rate,
FactBilling.notification_type
)
yearly_data = email_and_letters.union_all(sms).order_by(
'month',
'notification_type',
'rate'
).all()
return yearly_data
def fetch_billing_data_for_day(process_day, service_id=None):
start_date = convert_bst_to_utc(datetime.combine(process_day, time.min))
end_date = convert_bst_to_utc(datetime.combine(process_day + timedelta(days=1), time.min))
# use notification_history if process day is older than 7 days
# this is useful if we need to rebuild the ft_billing table for a date older than 7 days ago.
current_app.logger.info("Populate ft_billing for {} to {}".format(start_date, end_date))
table = Notification
if start_date < datetime.utcnow() - timedelta(days=7):
table = NotificationHistory
transit_data = db.session.query(
table.template_id,
table.service_id,
table.notification_type,
func.coalesce(table.sent_by,
case(
[
(table.notification_type == 'letter', 'dvla'),
(table.notification_type == 'sms', 'unknown'),
(table.notification_type == 'email', 'ses')
]),
).label('sent_by'),
func.coalesce(table.rate_multiplier, 1).cast(Integer).label('rate_multiplier'),
func.coalesce(table.international, False).label('international'),
func.sum(table.billable_units).label('billable_units'),
func.count().label('notifications_sent'),
Service.crown,
).filter(
table.status.in_(NOTIFICATION_STATUS_TYPES_BILLABLE),
table.key_type != KEY_TYPE_TEST,
table.created_at >= start_date,
table.created_at < end_date
).group_by(
table.template_id,
table.service_id,
table.notification_type,
'sent_by',
table.rate_multiplier,
table.international,
Service.crown
).join(
Service
)
if service_id:
transit_data = transit_data.filter(table.service_id == service_id)
return transit_data.all()
def get_rates_for_billing():
non_letter_rates = [(r.notification_type, r.valid_from, r.rate) for r in
Rate.query.order_by(desc(Rate.valid_from)).all()]
letter_rates = [(r.start_date, r.crown, r.sheet_count, r.rate) for r in
LetterRate.query.order_by(desc(LetterRate.start_date)).all()]
return non_letter_rates, letter_rates
def get_rate(non_letter_rates, letter_rates, notification_type, date, crown=None, rate_multiplier=None):
if notification_type == LETTER_TYPE:
return next(r[3] for r in letter_rates if date > r[0] and crown == r[1] and rate_multiplier == r[2])
elif notification_type == SMS_TYPE:
return next(r[2] for r in non_letter_rates if notification_type == r[0] and date > r[1])
else:
return 0
def update_fact_billing(data, process_day):
non_letter_rates, letter_rates = get_rates_for_billing()
rate = get_rate(non_letter_rates,
letter_rates,
data.notification_type,
process_day,
data.crown,
data.rate_multiplier)
billing_record = create_billing_record(data, rate, process_day)
table = FactBilling.__table__
'''
This uses the Postgres upsert to avoid race conditions when two threads try to insert
at the same row. The excluded object refers to values that we tried to insert but were
rejected.
http://docs.sqlalchemy.org/en/latest/dialects/postgresql.html#insert-on-conflict-upsert
'''
stmt = insert(table).values(
bst_date=billing_record.bst_date,
template_id=billing_record.template_id,
service_id=billing_record.service_id,
provider=billing_record.provider,
rate_multiplier=billing_record.rate_multiplier,
notification_type=billing_record.notification_type,
international=billing_record.international,
billable_units=billing_record.billable_units,
notifications_sent=billing_record.notifications_sent,
rate=billing_record.rate
)
stmt = stmt.on_conflict_do_update(
index_elements=[table.c.bst_date,
table.c.template_id,
table.c.service_id,
table.c.provider,
table.c.rate_multiplier,
table.c.notification_type,
table.c.international],
set_={"notifications_sent": stmt.excluded.notifications_sent,
"billable_units": stmt.excluded.billable_units
}
)
db.session.connection().execute(stmt)
db.session.commit()
def create_billing_record(data, rate, process_day):
billing_record = FactBilling(
bst_date=process_day,
template_id=data.template_id,
service_id=data.service_id,
notification_type=data.notification_type,
provider=data.sent_by,
rate_multiplier=data.rate_multiplier,
international=data.international,
billable_units=data.billable_units,
notifications_sent=data.notifications_sent,
rate=rate
)
return billing_record
Make sure the date does not include time
from datetime import datetime, timedelta, time
from flask import current_app
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy import func, case, desc, Date, Integer
from app import db
from app.dao.date_util import get_financial_year
from app.models import (
FactBilling,
Notification,
Service,
KEY_TYPE_TEST,
LETTER_TYPE,
SMS_TYPE,
Rate,
LetterRate,
NOTIFICATION_STATUS_TYPES_BILLABLE,
NotificationHistory,
EMAIL_TYPE
)
from app.utils import convert_utc_to_bst, convert_bst_to_utc
def fetch_billing_totals_for_year(service_id, year):
year_start_date, year_end_date = get_financial_year(year)
"""
Billing for email: only record the total number of emails.
Billing for letters: The billing units is used to fetch the correct rate for the sheet count of the letter.
Total cost is notifications_sent * rate.
Rate multiplier does not apply to email or letters.
"""
email_and_letters = db.session.query(
func.sum(FactBilling.notifications_sent).label("notifications_sent"),
func.sum(FactBilling.notifications_sent).label("billable_units"),
FactBilling.rate.label('rate'),
FactBilling.notification_type.label('notification_type')
).filter(
FactBilling.service_id == service_id,
FactBilling.bst_date >= year_start_date,
FactBilling.bst_date <= year_end_date,
FactBilling.notification_type.in_([EMAIL_TYPE, LETTER_TYPE])
).group_by(
FactBilling.rate,
FactBilling.notification_type
)
"""
Billing for SMS using the billing_units * rate_multiplier. Billing unit of SMS is the fragment count of a message
"""
sms = db.session.query(
func.sum(FactBilling.notifications_sent).label("notifications_sent"),
func.sum(FactBilling.billable_units * FactBilling.rate_multiplier).label("billable_units"),
FactBilling.rate,
FactBilling.notification_type
).filter(
FactBilling.service_id == service_id,
FactBilling.bst_date >= year_start_date,
FactBilling.bst_date <= year_end_date,
FactBilling.notification_type == SMS_TYPE
).group_by(
FactBilling.rate,
FactBilling.notification_type
)
yearly_data = email_and_letters.union_all(sms).order_by(
'notification_type',
'rate'
).all()
return yearly_data
def fetch_monthly_billing_for_year(service_id, year):
year_start_date, year_end_date = get_financial_year(year)
utcnow = datetime.utcnow()
today = convert_utc_to_bst(utcnow)
# if year end date is less than today, we are calculating for data in the past and have no need for deltas.
if year_end_date >= today:
yesterday = today - timedelta(days=1)
for day in [yesterday, today]:
data = fetch_billing_data_for_day(process_day=day, service_id=service_id)
for d in data:
update_fact_billing(data=d, process_day=day)
email_and_letters = db.session.query(
func.date_trunc('month', FactBilling.bst_date).cast(Date).label("month"),
func.sum(FactBilling.notifications_sent).label("notifications_sent"),
func.sum(FactBilling.notifications_sent).label("billable_units"),
FactBilling.rate.label('rate'),
FactBilling.notification_type.label('notification_type')
).filter(
FactBilling.service_id == service_id,
FactBilling.bst_date >= year_start_date,
FactBilling.bst_date <= year_end_date,
FactBilling.notification_type.in_([EMAIL_TYPE, LETTER_TYPE])
).group_by(
'month',
FactBilling.rate,
FactBilling.notification_type
)
sms = db.session.query(
func.date_trunc('month', FactBilling.bst_date).cast(Date).label("month"),
func.sum(FactBilling.notifications_sent).label("notifications_sent"),
func.sum(FactBilling.billable_units * FactBilling.rate_multiplier).label("billable_units"),
FactBilling.rate,
FactBilling.notification_type
).filter(
FactBilling.service_id == service_id,
FactBilling.bst_date >= year_start_date,
FactBilling.bst_date <= year_end_date,
FactBilling.notification_type == SMS_TYPE
).group_by(
'month',
FactBilling.rate,
FactBilling.notification_type
)
yearly_data = email_and_letters.union_all(sms).order_by(
'month',
'notification_type',
'rate'
).all()
return yearly_data
def fetch_billing_data_for_day(process_day, service_id=None):
start_date = convert_bst_to_utc(datetime.combine(process_day, time.min))
end_date = convert_bst_to_utc(datetime.combine(process_day + timedelta(days=1), time.min))
# use notification_history if process day is older than 7 days
# this is useful if we need to rebuild the ft_billing table for a date older than 7 days ago.
current_app.logger.info("Populate ft_billing for {} to {}".format(start_date, end_date))
table = Notification
if start_date < datetime.utcnow() - timedelta(days=7):
table = NotificationHistory
transit_data = db.session.query(
table.template_id,
table.service_id,
table.notification_type,
func.coalesce(table.sent_by,
case(
[
(table.notification_type == 'letter', 'dvla'),
(table.notification_type == 'sms', 'unknown'),
(table.notification_type == 'email', 'ses')
]),
).label('sent_by'),
func.coalesce(table.rate_multiplier, 1).cast(Integer).label('rate_multiplier'),
func.coalesce(table.international, False).label('international'),
func.sum(table.billable_units).label('billable_units'),
func.count().label('notifications_sent'),
Service.crown,
).filter(
table.status.in_(NOTIFICATION_STATUS_TYPES_BILLABLE),
table.key_type != KEY_TYPE_TEST,
table.created_at >= start_date,
table.created_at < end_date
).group_by(
table.template_id,
table.service_id,
table.notification_type,
'sent_by',
table.rate_multiplier,
table.international,
Service.crown
).join(
Service
)
if service_id:
transit_data = transit_data.filter(table.service_id == service_id)
return transit_data.all()
def get_rates_for_billing():
non_letter_rates = [(r.notification_type, r.valid_from, r.rate) for r in
Rate.query.order_by(desc(Rate.valid_from)).all()]
letter_rates = [(r.start_date, r.crown, r.sheet_count, r.rate) for r in
LetterRate.query.order_by(desc(LetterRate.start_date)).all()]
return non_letter_rates, letter_rates
def get_rate(non_letter_rates, letter_rates, notification_type, date, crown=None, rate_multiplier=None):
if notification_type == LETTER_TYPE:
return next(r[3] for r in letter_rates if date > r[0] and crown == r[1] and rate_multiplier == r[2])
elif notification_type == SMS_TYPE:
return next(r[2] for r in non_letter_rates if notification_type == r[0] and date > r[1])
else:
return 0
def update_fact_billing(data, process_day):
non_letter_rates, letter_rates = get_rates_for_billing()
rate = get_rate(non_letter_rates,
letter_rates,
data.notification_type,
process_day,
data.crown,
data.rate_multiplier)
billing_record = create_billing_record(data, rate, process_day)
table = FactBilling.__table__
'''
This uses the Postgres upsert to avoid race conditions when two threads try to insert
at the same row. The excluded object refers to values that we tried to insert but were
rejected.
http://docs.sqlalchemy.org/en/latest/dialects/postgresql.html#insert-on-conflict-upsert
'''
stmt = insert(table).values(
bst_date=billing_record.bst_date,
template_id=billing_record.template_id,
service_id=billing_record.service_id,
provider=billing_record.provider,
rate_multiplier=billing_record.rate_multiplier,
notification_type=billing_record.notification_type,
international=billing_record.international,
billable_units=billing_record.billable_units,
notifications_sent=billing_record.notifications_sent,
rate=billing_record.rate
)
stmt = stmt.on_conflict_do_update(
index_elements=[table.c.bst_date,
table.c.template_id,
table.c.service_id,
table.c.provider,
table.c.rate_multiplier,
table.c.notification_type,
table.c.international],
set_={"notifications_sent": stmt.excluded.notifications_sent,
"billable_units": stmt.excluded.billable_units
}
)
db.session.connection().execute(stmt)
db.session.commit()
def create_billing_record(data, rate, process_day):
billing_record = FactBilling(
bst_date=process_day.date(),
template_id=data.template_id,
service_id=data.service_id,
notification_type=data.notification_type,
provider=data.sent_by,
rate_multiplier=data.rate_multiplier,
international=data.international,
billable_units=data.billable_units,
notifications_sent=data.notifications_sent,
rate=rate
)
return billing_record
|
"""
@package mi.instrument.sunburst.sami2_pco2.pco2b.test.test_driver
@file marine-integrations/mi/instrument/sunburst/sami2_pco2/pco2b/driver.py
@author Kevin Stiemke
@brief Test cases for pco2b driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
__author__ = 'Kevin Stiemke'
__license__ = 'Apache 2.0'
import unittest
import time
import copy
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.log import get_logger
log = get_logger()
# MI imports.
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import DriverStartupConfigKey
from mi.idk.unit_test import AgentCapabilityType
from mi.core.instrument.chunker import StringChunker
from pyon.agent.agent import ResourceAgentEvent
from pyon.agent.agent import ResourceAgentState
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import InstrumentDriver
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import InstrumentCommand
from mi.instrument.sunburst.sami2_pco2.driver import ScheduledJob
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import ProtocolState
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import ProtocolEvent
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import Capability
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import Parameter
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import Protocol
from mi.instrument.sunburst.driver import Prompt
from mi.instrument.sunburst.driver import SAMI_NEWLINE
from mi.instrument.sunburst.sami2_pco2.driver import Pco2wSamiSampleDataParticleKey
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import Pco2wbDev1SampleDataParticleKey
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import Pco2wConfigurationDataParticleKey
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import DataParticleType
# Added Imports (Note, these pick up some of the base classes not directly imported above)
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverTestMixinSub
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverUnitTest
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverIntegrationTest
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverQualificationTest
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.sunburst.sami2_pco2.pco2b.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='V7HE4T',
instrument_agent_name='sunburst_sami2_pco2_pco2b',
instrument_agent_packet_config=DataParticleType(),
# driver_startup_config={}
driver_startup_config={
DriverStartupConfigKey.PARAMETERS: {
Parameter.EXTERNAL_PUMP_DELAY: 10,
Parameter.BIT_SWITCHES: 0x01
},
}
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###
# Driver constant definitions
###
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class DriverTestMixinSub(Pco2DriverTestMixinSub):
"""
Mixin class used for storing data particle constants and common data
assertion methods.
"""
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
_driver_capabilities = {
# capabilities defined in the IOS
Capability.ACQUIRE_STATUS: {STATES: [ProtocolState.COMMAND,
ProtocolState.AUTOSAMPLE]},
Capability.ACQUIRE_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.ACQUIRE_BLANK_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.START_AUTOSAMPLE: {STATES: [ProtocolState.COMMAND,
ProtocolState.AUTOSAMPLE]},
Capability.STOP_AUTOSAMPLE: {STATES: [ProtocolState.AUTOSAMPLE,
ProtocolState.COMMAND]},
Capability.DEIONIZED_WATER_FLUSH: {STATES: [ProtocolState.COMMAND]},
Capability.REAGENT_FLUSH: {STATES: [ProtocolState.COMMAND]},
Capability.DEIONIZED_WATER_FLUSH_100ML: {STATES: [ProtocolState.COMMAND]},
Capability.REAGENT_FLUSH_100ML: {STATES: [ProtocolState.COMMAND]},
Capability.RUN_EXTERNAL_PUMP: {STATES: [ProtocolState.COMMAND]}
}
###
# Instrument output (driver input) Definitions
###
# Configuration string received from the instrument via the L command
# (clock set to 2014-01-01 00:00:00) with sampling set to start 540 days
# (~18 months) later and stop 365 days after that. SAMI and Device1
# (external SBE pump) are set to run every 60 minutes, but will be polled
# on a regular schedule rather than autosampled. Device1 is not configured
# to run after the SAMI and will run for 10 seconds. To configure the
# instrument using this string, add a null byte (00) to the end of the
# string.
VALID_CONFIG_STRING = 'CEE90B0002C7EA0001E133800A000E100402000E10010B' + \
'000000000D000000000D000000000D07' + \
'1020FF54181C0100381E' + \
'000000000000000000000000000000000000000000000000000' + \
'000000000000000000000000000000000000000000000000000' + \
'0000000000000000000000000000' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + SAMI_NEWLINE
# Data records -- SAMI and Device1 (external pump) (responses to R0 and R1
# commands, respectively)
VALID_R0_BLANK_SAMPLE = '*542705CEE91CC800400019096206800730074C2CE042' + \
'74003B0018096106800732074E0D82066124' + SAMI_NEWLINE
VALID_R0_DATA_SAMPLE = '*542704CEE91CC8003B001909620155073003E908A1232' + \
'D0043001A09620154072F03EA0D92065F3B' + SAMI_NEWLINE
VALID_R1_SAMPLE = '*540711CEE91DE2CE' + SAMI_NEWLINE
###
# Parameter and Type Definitions
###
_driver_parameters = {
# Parameters defined in the IOS
Parameter.LAUNCH_TIME: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00000000, VALUE: 0xCEE90B00},
Parameter.START_TIME_FROM_LAUNCH: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x02C7EA00, VALUE: 0x02C7EA00},
Parameter.STOP_TIME_FROM_START: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x01E13380, VALUE: 0x01E13380},
Parameter.MODE_BITS: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0A, VALUE: 0x0A},
Parameter.SAMI_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000E10, VALUE: 0x000E10},
Parameter.SAMI_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x04, VALUE: 0x04},
Parameter.SAMI_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x02, VALUE: 0x02},
Parameter.DEVICE1_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000E10, VALUE: 0x000E10},
Parameter.DEVICE1_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x01, VALUE: 0x01},
Parameter.DEVICE1_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0B, VALUE: 0x0B},
Parameter.DEVICE2_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000},
Parameter.DEVICE2_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00},
Parameter.DEVICE2_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0D, VALUE: 0x0D},
Parameter.DEVICE3_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000},
Parameter.DEVICE3_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00},
Parameter.DEVICE3_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0D, VALUE: 0x0D},
Parameter.PRESTART_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000},
Parameter.PRESTART_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00},
Parameter.PRESTART_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0D, VALUE: 0x0D},
Parameter.GLOBAL_CONFIGURATION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x07, VALUE: 0x07},
Parameter.PUMP_PULSE: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x10, VALUE: 0x10},
Parameter.PUMP_DURATION: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x20, VALUE: 0x20},
Parameter.SAMPLES_PER_MEASUREMENT: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0xFF, VALUE: 0xFF},
Parameter.CYCLES_BETWEEN_BLANKS: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x54, VALUE: 0x54},
Parameter.NUMBER_REAGENT_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x18, VALUE: 0x18},
Parameter.NUMBER_BLANK_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x1C, VALUE: 0x1C},
Parameter.FLUSH_PUMP_INTERVAL: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x01, VALUE: 0x01},
Parameter.BIT_SWITCHES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00},
Parameter.NUMBER_EXTRA_PUMP_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x38, VALUE: 0x38},
Parameter.EXTERNAL_PUMP_SETTINGS: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x1E, VALUE: 0x1E},
Parameter.AUTO_SAMPLE_INTERVAL: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 3600, VALUE: 3600},
Parameter.EXTERNAL_PUMP_DELAY: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 360, VALUE: 360},
Parameter.REAGENT_FLUSH_DURATION: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x08, VALUE: 0x08, REQUIRED: True},
Parameter.DEIONIZED_WATER_FLUSH_DURATION: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x08, VALUE: 0x08, REQUIRED: True},
Parameter.PUMP_100ML_CYCLES: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x01, VALUE: 0x01, REQUIRED: True},
}
_sami_data_sample_parameters = {
# SAMI Type 4/5 sample (in this case it is a Type 4)
Pco2wSamiSampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0x27, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x04, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCEE91CC8, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.LIGHT_MEASUREMENTS: {TYPE: list, VALUE: [0x003B, 0x0019, 0x0962, 0x0155,
0x0730, 0x03E9, 0x08A1, 0x232D,
0x0043, 0x001A, 0x0962, 0x0154,
0x072F, 0x03EA], REQUIRED: True},
Pco2wSamiSampleDataParticleKey.VOLTAGE_BATTERY: {TYPE: int, VALUE: 0x0D92, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.THERMISTER_RAW: {TYPE: int, VALUE: 0x065F, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0x3B, REQUIRED: True}
}
_sami_blank_sample_parameters = {
# SAMI Type 4/5 sample (in this case it is a Type 5)
Pco2wSamiSampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0x27, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x05, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCEE91CC8, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.LIGHT_MEASUREMENTS: {TYPE: list, VALUE: [0x0040, 0x0019, 0x0962, 0x0680, 0x0730,
0x074C, 0x2CE0, 0x4274, 0x003B, 0x0018,
0x0961, 0x0680, 0x0732, 0x074E],
REQUIRED: True},
Pco2wSamiSampleDataParticleKey.VOLTAGE_BATTERY: {TYPE: int, VALUE: 0x0D82, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.THERMISTER_RAW: {TYPE: int, VALUE: 0x0661, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0x24, REQUIRED: True}
}
_dev1_sample_parameters = {
# Device 1 (external pump) Type 17 sample
Pco2wbDev1SampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wbDev1SampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0x07, REQUIRED: True},
Pco2wbDev1SampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x11, REQUIRED: True},
Pco2wbDev1SampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCEE91DE2, REQUIRED: True},
Pco2wbDev1SampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0xCE, REQUIRED: True}
}
_configuration_parameters = {
# Configuration settings
Pco2wConfigurationDataParticleKey.LAUNCH_TIME: {TYPE: int, VALUE: 0xCEE90B00, REQUIRED: True},
Pco2wConfigurationDataParticleKey.START_TIME_OFFSET: {TYPE: int, VALUE: 0x02C7EA00, REQUIRED: True},
Pco2wConfigurationDataParticleKey.RECORDING_TIME: {TYPE: int, VALUE: 0x01E13380, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PMI_SAMPLE_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SAMI_SAMPLE_SCHEDULE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT1_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT1_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT2_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT2_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT3_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT3_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.TIMER_INTERVAL_SAMI: {TYPE: int, VALUE: 0x000E10, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DRIVER_ID_SAMI: {TYPE: int, VALUE: 0x04, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PARAMETER_POINTER_SAMI: {TYPE: int, VALUE: 0x02, REQUIRED: True},
Pco2wConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE1: {TYPE: int, VALUE: 0x000E10, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DRIVER_ID_DEVICE1: {TYPE: int, VALUE: 0x01, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE1: {TYPE: int, VALUE: 0x0B, REQUIRED: True},
Pco2wConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE2: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DRIVER_ID_DEVICE2: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE2: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2wConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE3: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DRIVER_ID_DEVICE3: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE3: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2wConfigurationDataParticleKey.TIMER_INTERVAL_PRESTART: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DRIVER_ID_PRESTART: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PARAMETER_POINTER_PRESTART: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2wConfigurationDataParticleKey.USE_BAUD_RATE_57600: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SEND_RECORD_TYPE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SEND_LIVE_RECORDS: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2wConfigurationDataParticleKey.EXTEND_GLOBAL_CONFIG: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PUMP_PULSE: {TYPE: int, VALUE: 0x10, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PUMP_DURATION: {TYPE: int, VALUE: 0x20, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SAMPLES_PER_MEASUREMENT: {TYPE: int, VALUE: 0xFF, REQUIRED: True},
Pco2wConfigurationDataParticleKey.CYCLES_BETWEEN_BLANKS: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wConfigurationDataParticleKey.NUMBER_REAGENT_CYCLES: {TYPE: int, VALUE: 0x18, REQUIRED: True},
Pco2wConfigurationDataParticleKey.NUMBER_BLANK_CYCLES: {TYPE: int, VALUE: 0x1C, REQUIRED: True},
Pco2wConfigurationDataParticleKey.FLUSH_PUMP_INTERVAL: {TYPE: int, VALUE: 0x01, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DISABLE_START_BLANK_FLUSH: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.MEASURE_AFTER_PUMP_PULSE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.NUMBER_EXTRA_PUMP_CYCLES: {TYPE: int, VALUE: 0x38, REQUIRED: True},
Pco2wConfigurationDataParticleKey.EXTERNAL_PUMP_SETTINGS: {TYPE: int, VALUE: 0x1E, REQUIRED: True}
}
###
# Driver Parameter Methods
###
def assert_driver_parameters(self, current_parameters, verify_values=False):
"""
Verify that all driver parameters are correct and potentially verify
values.
@param current_parameters: driver parameters read from the driver
instance
@param verify_values: should we verify values against definition?
"""
self.assert_parameters(current_parameters, self._driver_parameters,
verify_values)
def assert_particle_sami_data_sample(self, data_particle, verify_values=False):
"""
Verify sami_data_sample particle (Type 4)
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
self.assertEqual(record_type, 4, msg="Not a regular sample, record_type = %d" % record_type)
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_data_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.SAMI_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._sami_data_sample_parameters,
verify_values)
def assert_particle_sami_blank_sample(self, data_particle, verify_values=False):
"""
Verify sami_blank_sample particle (Type 5)
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
self.assertEqual(record_type, 5, msg="Not a blank sample, record_type = %d" % record_type)
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_blank_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.SAMI_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._sami_blank_sample_parameters,
verify_values)
def assert_particle_dev1_sample(self, data_particle, verify_values=False):
"""
Verify dev1_sample particle
@param data_particle: Pco2wDev1SampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
self.assertEqual(record_type, 17, msg="Not a device 1 sample, record_type = %d" % record_type)
self.assert_data_particle_keys(Pco2wbDev1SampleDataParticleKey,
self._dev1_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.DEV1_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._dev1_sample_parameters,
verify_values)
def assert_particle_configuration(self, data_particle, verify_values=False):
"""
Verify configuration particle
@param data_particle: Pco2wConfigurationDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(Pco2wConfigurationDataParticleKey,
self._configuration_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.CONFIGURATION)
self.assert_data_particle_parameters(data_particle,
self._configuration_parameters,
verify_values)
###############################################################################
# UNIT TESTS #
# Unit Tests: test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
@attr('UNIT', group='mi')
class DriverUnitTest(Pco2DriverUnitTest, DriverTestMixinSub):
capabilities_test_dict = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.WAITING: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.COMMAND: ['DRIVER_EVENT_GET',
'DRIVER_EVENT_SET',
'DRIVER_EVENT_START_DIRECT',
'DRIVER_EVENT_ACQUIRE_STATUS',
'DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_ACQUIRE_BLANK_SAMPLE',
'DRIVER_EVENT_START_AUTOSAMPLE',
'DRIVER_EVENT_DEIONIZED_WATER_FLUSH',
'DRIVER_EVENT_REAGENT_FLUSH',
'DRIVER_EVENT_DEIONIZED_WATER_FLUSH_100ML',
'DRIVER_EVENT_REAGENT_FLUSH_100ML',
'DRIVER_EVENT_RUN_EXTERNAL_PUMP'],
ProtocolState.DEIONIZED_WATER_FLUSH: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.REAGENT_FLUSH: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.DEIONIZED_WATER_FLUSH_100ML: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.REAGENT_FLUSH_100ML: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.RUN_EXTERNAL_PUMP: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.AUTOSAMPLE: ['DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_ACQUIRE_BLANK_SAMPLE',
'DRIVER_EVENT_STOP_AUTOSAMPLE',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.DIRECT_ACCESS: ['EXECUTE_DIRECT',
'DRIVER_EVENT_STOP_DIRECT'],
ProtocolState.POLLED_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.POLLED_BLANK_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.SCHEDULED_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.SCHEDULED_BLANK_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
}
def test_base_driver_enums(self):
"""
Verify that all the SAMI Instrument driver enumerations have no
duplicate values that might cause confusion. Also do a little
extra validation for the Capabilites
Extra enumeration tests are done in a specific subclass
"""
# Test Enums defined in the base SAMI driver
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
# Test capabilites for duplicates, then verify that capabilities
# is a subset of proto events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might
cause confusion.
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(InstrumentCommand())
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(Protocol.sieve_function)
for part in [self.VALID_STATUS_MESSAGE, self.VALID_CONTROL_RECORD, self.VALID_R0_BLANK_SAMPLE,
self.VALID_R0_DATA_SAMPLE, self.VALID_R1_SAMPLE, self.VALID_CONFIG_STRING]:
self.assert_chunker_sample(chunker, part)
self.assert_chunker_sample_with_noise(chunker, part)
self.assert_chunker_fragmented_sample(chunker, part)
self.assert_chunker_combined_sample(chunker, part)
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the
correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# Start validating data particles
self.assert_particle_published(driver, self.VALID_STATUS_MESSAGE,
self.assert_particle_regular_status, True)
self.assert_particle_published(driver, self.VALID_CONTROL_RECORD,
self.assert_particle_control_record, True)
self.assert_particle_published(driver, self.VALID_R0_BLANK_SAMPLE,
self.assert_particle_sami_blank_sample, True)
self.assert_particle_published(driver, self.VALID_R0_DATA_SAMPLE,
self.assert_particle_sami_data_sample, True)
self.assert_particle_published(driver, self.VALID_R1_SAMPLE,
self.assert_particle_dev1_sample, True)
self.assert_particle_published(driver, self.VALID_CONFIG_STRING,
self.assert_particle_configuration, True)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities. Iterate through available
capabilities, and verify that they can pass successfully through the
filter. Test silly made up capabilities to verify they are blocked by
filter.
"""
mock_callback = Mock()
protocol = Protocol(Prompt, SAMI_NEWLINE, mock_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in
this dict must also be defined in the protocol FSM. Note, the EXIT and
ENTER DRIVER_EVENTS don't need to be listed here.
"""
# capabilities defined in base class test_driver.
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, self.capabilities_test_dict)
def test_pump_commands(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_pump_commands(driver)
def test_pump_timing(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_pump_timing(driver)
def test_waiting_discover(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_waiting_discover(driver)
def test_autosample_timing(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_autosample_timing(driver)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class DriverIntegrationTest(Pco2DriverIntegrationTest, DriverTestMixinSub):
"""
Integration Tests:
test_startup_params: Verify that driver startup parameters are set properly.
test_set: In command state, test configuration particle generation.
Parameter.PUMP_PULSE
Parameter.PUMP_DURATION
Parameter.SAMPLES_PER_MEASUREMENT
Parameter.CYCLES_BETWEEN_BLANKS
Parameter.NUMBER_REAGENT_CYCLES
Parameter.NUMBER_BLANK_CYCLES
Parameter.FLUSH_PUMP_INTERVAL
Parameter.BIT_SWITCHES
Parameter.NUMBER_EXTRA_PUMP_CYCLES
Parameter.AUTO_SAMPLE_INTERVAL
Negative Set Tests:
START_TIME_FROM_LAUNCH
STOP_TIME_FROM_START
MODE_BITS
SAMI_SAMPLE_INTERVAL
test_commands: In autosample and command states, test particle generation.
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
ACQUIRE_BLANK_SAMPLE = ProtocolEvent.ACQUIRE_BLANK_SAMPLE
test_autosample: Test autosample particle generation.
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
test_scheduled_data: In command and autosample states
ACQUIRE_STATUS
"""
def test_startup_params(self):
startup_values = {
Parameter.PUMP_PULSE: 0x10,
Parameter.PUMP_DURATION: 0x20,
Parameter.SAMPLES_PER_MEASUREMENT: 0xFF,
Parameter.CYCLES_BETWEEN_BLANKS: 0x54,
Parameter.NUMBER_REAGENT_CYCLES: 0x18,
Parameter.NUMBER_BLANK_CYCLES: 0x1C,
Parameter.FLUSH_PUMP_INTERVAL: 0x01,
Parameter.BIT_SWITCHES: 0x01,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 0x38,
Parameter.EXTERNAL_PUMP_SETTINGS: 0x1E,
Parameter.EXTERNAL_PUMP_DELAY: 10,
Parameter.AUTO_SAMPLE_INTERVAL: 3600,
Parameter.REAGENT_FLUSH_DURATION: 0x08,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 0x08,
Parameter.PUMP_100ML_CYCLES: 1
}
new_values = {
Parameter.PUMP_PULSE: 0x11,
Parameter.PUMP_DURATION: 0x21,
Parameter.SAMPLES_PER_MEASUREMENT: 0xFA,
Parameter.CYCLES_BETWEEN_BLANKS: 0xA9,
Parameter.NUMBER_REAGENT_CYCLES: 0x19,
Parameter.NUMBER_BLANK_CYCLES: 0x1D,
Parameter.FLUSH_PUMP_INTERVAL: 0x02,
Parameter.BIT_SWITCHES: 0x02,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 0x39,
Parameter.EXTERNAL_PUMP_SETTINGS: 0x40,
Parameter.EXTERNAL_PUMP_DELAY: 300,
Parameter.AUTO_SAMPLE_INTERVAL: 600,
Parameter.REAGENT_FLUSH_DURATION: 0x01,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 0x0F,
Parameter.PUMP_100ML_CYCLES: 14
}
self.assert_initialize_driver()
for (key, val) in startup_values.iteritems():
self.assert_get(key, val)
self.assert_set_bulk(new_values)
reply = self.driver_client.cmd_dvr('apply_startup_params')
for (key, val) in startup_values.iteritems():
self.assert_get(key, val)
def test_set(self):
self.assert_initialize_driver()
self.assert_set(Parameter.AUTO_SAMPLE_INTERVAL, 77)
self.assert_set(Parameter.CYCLES_BETWEEN_BLANKS, 7)
self.assert_set(Parameter.PUMP_PULSE, 20)
self.assert_set(Parameter.SAMPLES_PER_MEASUREMENT, 239)
self.assert_set(Parameter.NUMBER_REAGENT_CYCLES, 26)
self.assert_set(Parameter.NUMBER_BLANK_CYCLES, 30)
self.assert_set(Parameter.FLUSH_PUMP_INTERVAL, 2)
self.assert_set(Parameter.BIT_SWITCHES, 1)
self.assert_set(Parameter.NUMBER_EXTRA_PUMP_CYCLES, 88)
self.assert_set(Parameter.EXTERNAL_PUMP_SETTINGS, 40)
self.assert_set(Parameter.EXTERNAL_PUMP_DELAY, 60)
self.assert_set(Parameter.REAGENT_FLUSH_DURATION, 16)
self.assert_set(Parameter.DEIONIZED_WATER_FLUSH_DURATION, 4)
self.assert_set(Parameter.PUMP_100ML_CYCLES, 14)
self.assert_set_readonly(Parameter.START_TIME_FROM_LAUNCH, 84600)
self.assert_set_readonly(Parameter.STOP_TIME_FROM_START, 84600)
self.assert_set_readonly(Parameter.MODE_BITS, 10)
self.assert_set_readonly(Parameter.SAMI_SAMPLE_INTERVAL, 1800)
def test_bulk_set(self):
self.assert_initialize_driver()
new_values = {
Parameter.AUTO_SAMPLE_INTERVAL: 77,
Parameter.CYCLES_BETWEEN_BLANKS: 7,
Parameter.PUMP_PULSE: 20,
Parameter.SAMPLES_PER_MEASUREMENT: 239,
Parameter.NUMBER_REAGENT_CYCLES: 26,
Parameter.NUMBER_BLANK_CYCLES: 30,
Parameter.FLUSH_PUMP_INTERVAL: 2,
Parameter.BIT_SWITCHES: 1,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 88,
Parameter.EXTERNAL_PUMP_SETTINGS: 40,
Parameter.EXTERNAL_PUMP_DELAY: 60,
Parameter.REAGENT_FLUSH_DURATION: 4,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 16,
Parameter.PUMP_100ML_CYCLES: 14
}
self.assert_set_bulk(new_values)
def test_bad_parameters(self):
self.assert_initialize_driver()
self.assert_set_exception(Parameter.CYCLES_BETWEEN_BLANKS, 7.0)
self.assert_set_exception(Parameter.PUMP_PULSE, 20.0)
self.assert_set_exception(Parameter.SAMPLES_PER_MEASUREMENT, 239.0)
self.assert_set_exception(Parameter.NUMBER_REAGENT_CYCLES, 26.0)
self.assert_set_exception(Parameter.NUMBER_BLANK_CYCLES, 30.0)
self.assert_set_exception(Parameter.FLUSH_PUMP_INTERVAL, 2.0)
self.assert_set_exception(Parameter.BIT_SWITCHES, 1.0)
self.assert_set_exception(Parameter.NUMBER_EXTRA_PUMP_CYCLES, 88.0)
self.assert_set_exception(Parameter.EXTERNAL_PUMP_SETTINGS, 40.0)
## EXTERNAL_PUMP_DELAY is set to 10 seconds in the startup_config. It defaults to 10 minutes
def test_external_pump_delay(self):
"""
Test delay between running of external pump and taking a sample
"""
max_sample_time = 15 # Maximum observed sample time with current configuration.
global dev1_sample
global data_sample
def get_dev1_sample(particle):
"""
Get dev1 sample
:param particle: dev1 sample particle
"""
global dev1_sample
dev1_sample = particle
def get_data_sample(particle):
"""
Get data sample
:param particle: data sample particle
"""
global data_sample
data_sample = particle
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_async_particle_generation(DataParticleType.DEV1_SAMPLE, get_dev1_sample, timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, get_data_sample, timeout=180)
dev1_dict = self.get_data_particle_values_as_dict(dev1_sample)
sample_dict = self.get_data_particle_values_as_dict(data_sample)
dev1_time = dev1_dict.get(Pco2wbDev1SampleDataParticleKey.RECORD_TIME)
sample_time = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TIME)
time_diff = sample_time - dev1_time
self.assertTrue((time_diff > 10) and (time_diff < (10 + max_sample_time)),
"External pump delay %s is invalid" % time_diff)
self.assert_set(Parameter.EXTERNAL_PUMP_DELAY, 60)
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_async_particle_generation(DataParticleType.DEV1_SAMPLE, get_dev1_sample, timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, get_data_sample, timeout=180)
dev1_dict = self.get_data_particle_values_as_dict(dev1_sample)
sample_dict = self.get_data_particle_values_as_dict(data_sample)
dev1_time = dev1_dict.get(Pco2wbDev1SampleDataParticleKey.RECORD_TIME)
sample_time = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TIME)
time_diff = sample_time - dev1_time
self.assertTrue((time_diff > 60) and (time_diff < (60 + max_sample_time)),
"External pump delay %s is invalid" % time_diff)
def test_acquire_sample(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_async_particle_generation(DataParticleType.DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=180)
def test_acquire_blank_sample(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE)
self.assert_async_particle_generation(DataParticleType.DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_blank_sample,
timeout=180)
def test_auto_sample(self):
self.assert_initialize_driver()
self.assert_set(Parameter.AUTO_SAMPLE_INTERVAL, 80)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=4, timeout=400)
self.assert_async_particle_generation(DataParticleType.DEV1_SAMPLE, self.assert_particle_dev1_sample,
particle_count=4)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
self.clear_events()
#Now verify that no more particles get generated
failed = False
try:
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=240)
self.assert_async_particle_generation(DataParticleType.DEV1_SAMPLE, self.assert_particle_dev1_sample)
failed = True
except AssertionError:
pass
self.assertFalse(failed)
#Restart autosample
self.clear_events()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=4, timeout=400)
self.assert_async_particle_generation(DataParticleType.DEV1_SAMPLE, self.assert_particle_dev1_sample,
particle_count=4)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_polled_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE, state=ProtocolState.POLLED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=180)
def test_polled_blank_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, state=ProtocolState.POLLED_BLANK_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_blank_sample,
timeout=180)
def test_scheduled_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=180)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_scheduled_blank_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=180)
self.clear_events()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, state=ProtocolState.SCHEDULED_BLANK_SAMPLE,
delay=5)
self.assert_async_particle_generation(DataParticleType.DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_blank_sample,
timeout=180)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_scheduled_device_status_auto_sample(self):
"""
Verify the device status command can be triggered and run in autosample
"""
self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, delay=180)
self.clear_events()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.CONFIGURATION, self.assert_particle_configuration,
timeout=300)
self.assert_async_particle_generation(DataParticleType.BATTERY_VOLTAGE, self.assert_particle_battery_voltage)
self.assert_async_particle_generation(DataParticleType.THERMISTOR_VOLTAGE,
self.assert_particle_thermistor_voltage)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
def test_queued_command(self):
"""
Verify status is queued while samples are being taken
"""
self.assert_initialize_driver()
## Queue status
self.clear_events()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=1, timeout=220)
self.assert_async_particle_generation(DataParticleType.DEV1_SAMPLE, self.assert_particle_dev1_sample,
particle_count=1, timeout=60)
self.assert_async_particle_generation(DataParticleType.REGULAR_STATUS, self.assert_particle_regular_status,
timeout=180)
self.assert_current_state(ProtocolState.COMMAND)
def test_queued_autosample(self):
"""
Verify status is queued while samples are being taken
"""
self.assert_initialize_driver()
self.clear_events()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
## Queue status
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=1, timeout=220)
self.assert_async_particle_generation(DataParticleType.DEV1_SAMPLE, self.assert_particle_dev1_sample,
particle_count=1, timeout=60)
self.assert_async_particle_generation(DataParticleType.REGULAR_STATUS, self.assert_particle_regular_status,
timeout=180)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
def test_acquire_status(self):
self.assert_initialize_driver()
self.clear_events()
self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS, DataParticleType.REGULAR_STATUS,
self.assert_particle_regular_status)
self.assert_async_particle_generation(DataParticleType.CONFIGURATION, self.assert_particle_configuration)
self.assert_async_particle_generation(DataParticleType.BATTERY_VOLTAGE, self.assert_particle_battery_voltage)
self.assert_async_particle_generation(DataParticleType.THERMISTOR_VOLTAGE,
self.assert_particle_thermistor_voltage)
def test_scheduled_device_status_command(self):
"""
Verify the device status command can be triggered and run in command
"""
self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, delay=120)
self.clear_events()
self.assert_async_particle_generation(DataParticleType.CONFIGURATION, self.assert_particle_configuration,
timeout=180)
self.assert_async_particle_generation(DataParticleType.BATTERY_VOLTAGE, self.assert_particle_battery_voltage)
self.assert_async_particle_generation(DataParticleType.THERMISTOR_VOLTAGE,
self.assert_particle_thermistor_voltage)
self.assert_current_state(ProtocolState.COMMAND)
def test_run_external_pump(self):
"""
Test running external pump and queueing status
"""
self.assert_initialize_driver()
self.clear_events()
self.assert_driver_command(ProtocolEvent.RUN_EXTERNAL_PUMP)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(DataParticleType.DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=20.0)
self.assert_async_particle_generation(DataParticleType.REGULAR_STATUS, self.assert_particle_regular_status,
timeout=20.0)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class DriverQualificationTest(Pco2DriverQualificationTest, DriverTestMixinSub):
@unittest.skip("Runs for several hours to test default autosample rate of 60 minutes")
def test_overnight(self):
"""
Verify autosample at default rate
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.BIT_SWITCHES, 0x00)
self.assert_set_parameter(Parameter.EXTERNAL_PUMP_DELAY, 360)
request_sample = time.time()
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_dev1_sample,
DataParticleType.DEV1_SAMPLE, sample_count=1, timeout=120)
receive_dev1_sample = time.time()
dev1_sample_time = receive_dev1_sample - request_sample
self.assert_sample_async(self.assert_particle_sami_blank_sample, DataParticleType.SAMI_SAMPLE, timeout=800)
receive_sample = time.time()
sample_time = receive_sample - request_sample
log.debug("dev1_sample_time = %s", dev1_sample_time)
log.debug("sample_time = %s", sample_time)
self.assert_sample_autosample(self.assert_particle_sami_data_sample, DataParticleType.SAMI_SAMPLE,
timeout=14400)
def test_direct_access_telnet_mode(self):
"""
@brief This test manually tests that the Instrument Driver properly
supports direct access to the physical instrument. (telnet mode)
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.CYCLES_BETWEEN_BLANKS, 7)
configuration_string = 'CF87945A02C7EA0001E133800A000E100402000E10010B0000000000000000000000000000000' + \
'71020FFA8181C0100383C00000000000000000000000000000000000000000000000000000000' + \
'00000000000000000000000000000000000000000000000000000000000000000000000000000' + \
'0FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'
self.assert_direct_access_start_telnet()
self.assertTrue(self.tcp_client)
# Erase memory
self.tcp_client.send_data("E5A%s" % SAMI_NEWLINE)
time.sleep(1)
# Load a new configuration string changing X to X
self.tcp_client.send_data("L5A%s" % SAMI_NEWLINE)
time.sleep(1)
self.tcp_client.send_data("%s00%s" % (configuration_string, SAMI_NEWLINE))
time.sleep(1)
# Check that configuration was changed
self.tcp_client.send_data("L%s" % SAMI_NEWLINE)
return_value = self.tcp_client.expect(configuration_string)
self.assertTrue(return_value)
###
# Add instrument specific code here.
###
self.assert_direct_access_stop_telnet()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_get_parameter(Parameter.CYCLES_BETWEEN_BLANKS, 7)
def test_command_poll(self):
self.assert_enter_command_mode()
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_dev1_sample,
DataParticleType.DEV1_SAMPLE, sample_count=1, timeout=200)
self.assert_sample_async(self.assert_particle_sami_data_sample, DataParticleType.SAMI_SAMPLE, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, self.assert_particle_dev1_sample,
DataParticleType.DEV1_SAMPLE, sample_count=1, timeout=200)
self.assert_sample_async(self.assert_particle_sami_blank_sample, DataParticleType.SAMI_SAMPLE, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.REGULAR_STATUS, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_configuration,
DataParticleType.CONFIGURATION, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_battery_voltage,
DataParticleType.BATTERY_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_thermistor_voltage,
DataParticleType.THERMISTOR_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.RUN_EXTERNAL_PUMP, self.assert_particle_dev1_sample,
DataParticleType.DEV1_SAMPLE, sample_count=1, timeout=200)
self.assert_resource_command(ProtocolEvent.DEIONIZED_WATER_FLUSH, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.REAGENT_FLUSH, delay=15, agent_state=ResourceAgentState.COMMAND,
resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.DEIONIZED_WATER_FLUSH_100ML, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.REAGENT_FLUSH_100ML, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
def test_autosample_poll(self):
self.assert_enter_command_mode()
self.assert_start_autosample(timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_dev1_sample,
DataParticleType.DEV1_SAMPLE, sample_count=1, timeout=200)
self.assert_sample_async(self.assert_particle_sami_data_sample, DataParticleType.SAMI_SAMPLE, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, self.assert_particle_dev1_sample,
DataParticleType.DEV1_SAMPLE, sample_count=1, timeout=200)
self.assert_sample_async(self.assert_particle_sami_blank_sample, DataParticleType.SAMI_SAMPLE, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.REGULAR_STATUS, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_configuration,
DataParticleType.CONFIGURATION, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_battery_voltage,
DataParticleType.BATTERY_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_thermistor_voltage,
DataParticleType.THERMISTOR_VOLTAGE, sample_count=1, timeout=10)
self.assert_stop_autosample()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
def test_autosample(self):
"""
Verify autosample works and data particles are created
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.AUTO_SAMPLE_INTERVAL, 80)
self.assert_sample_autosample(self.assert_particle_sami_data_sample, DataParticleType.SAMI_SAMPLE)
def test_get_capabilities(self):
"""
@brief Verify that the correct capabilities are returned from get_capabilities
at various driver/agent states.
"""
self.assert_enter_command_mode()
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.ACQUIRE_BLANK_SAMPLE,
ProtocolEvent.DEIONIZED_WATER_FLUSH,
ProtocolEvent.REAGENT_FLUSH,
ProtocolEvent.DEIONIZED_WATER_FLUSH_100ML,
ProtocolEvent.REAGENT_FLUSH_100ML,
ProtocolEvent.RUN_EXTERNAL_PUMP
],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_capabilities(capabilities)
##################
# DA Mode
##################
da_capabilities = copy.deepcopy(capabilities)
da_capabilities[AgentCapabilityType.AGENT_COMMAND] = [ResourceAgentEvent.GO_COMMAND]
da_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
# Test direct access disconnect
self.assert_direct_access_start_telnet(timeout=10)
self.assertTrue(self.tcp_client)
self.assert_capabilities(da_capabilities)
self.tcp_client.disconnect()
# Now do it again, but use the event to stop DA
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_direct_access_start_telnet(timeout=10)
self.assert_capabilities(da_capabilities)
self.assert_direct_access_stop_telnet()
##################
# Command Mode
##################
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
##################
# Streaming Mode
##################
st_capabilities = copy.deepcopy(capabilities)
st_capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.STREAMING)
st_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [
ProtocolEvent.STOP_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.ACQUIRE_BLANK_SAMPLE
]
self.assert_start_autosample(timeout=200)
self.assert_capabilities(st_capabilities)
self.assert_stop_autosample()
##################
# Command Mode
##################
# We should be back in command mode from DA.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
self.assert_reset()
self.assert_capabilities(capabilities)
forgot file
"""
@package mi.instrument.sunburst.sami2_pco2.pco2b.test.test_driver
@file marine-integrations/mi/instrument/sunburst/sami2_pco2/pco2b/driver.py
@author Kevin Stiemke
@brief Test cases for pco2b driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
__author__ = 'Kevin Stiemke'
__license__ = 'Apache 2.0'
import unittest
import time
import copy
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.log import get_logger
log = get_logger()
# MI imports.
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import DriverStartupConfigKey
from mi.idk.unit_test import AgentCapabilityType
from mi.core.instrument.chunker import StringChunker
from pyon.agent.agent import ResourceAgentEvent
from pyon.agent.agent import ResourceAgentState
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import InstrumentDriver
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import InstrumentCommand
from mi.instrument.sunburst.sami2_pco2.driver import ScheduledJob
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import ProtocolState
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import ProtocolEvent
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import Capability
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import Parameter
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import Protocol
from mi.instrument.sunburst.driver import Prompt
from mi.instrument.sunburst.driver import SAMI_NEWLINE
from mi.instrument.sunburst.sami2_pco2.driver import Pco2wSamiSampleDataParticleKey
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import Pco2wbDev1SampleDataParticleKey
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import Pco2wConfigurationDataParticleKey
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import DataParticleType
# Added Imports (Note, these pick up some of the base classes not directly imported above)
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverTestMixinSub
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverUnitTest
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverIntegrationTest
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverQualificationTest
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.sunburst.sami2_pco2.pco2b.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='V7HE4T',
instrument_agent_name='sunburst_sami2_pco2_pco2b',
instrument_agent_packet_config=DataParticleType(),
# driver_startup_config={}
driver_startup_config={
DriverStartupConfigKey.PARAMETERS: {
Parameter.EXTERNAL_PUMP_DELAY: 10,
Parameter.BIT_SWITCHES: 0x01
},
}
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###
# Driver constant definitions
###
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class DriverTestMixinSub(Pco2DriverTestMixinSub):
"""
Mixin class used for storing data particle constants and common data
assertion methods.
"""
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
_driver_capabilities = {
# capabilities defined in the IOS
Capability.ACQUIRE_STATUS: {STATES: [ProtocolState.COMMAND,
ProtocolState.AUTOSAMPLE]},
Capability.ACQUIRE_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.ACQUIRE_BLANK_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.START_AUTOSAMPLE: {STATES: [ProtocolState.COMMAND,
ProtocolState.AUTOSAMPLE]},
Capability.STOP_AUTOSAMPLE: {STATES: [ProtocolState.AUTOSAMPLE,
ProtocolState.COMMAND]},
Capability.DEIONIZED_WATER_FLUSH: {STATES: [ProtocolState.COMMAND]},
Capability.REAGENT_FLUSH: {STATES: [ProtocolState.COMMAND]},
Capability.DEIONIZED_WATER_FLUSH_100ML: {STATES: [ProtocolState.COMMAND]},
Capability.REAGENT_FLUSH_100ML: {STATES: [ProtocolState.COMMAND]},
Capability.RUN_EXTERNAL_PUMP: {STATES: [ProtocolState.COMMAND]}
}
###
# Instrument output (driver input) Definitions
###
# Configuration string received from the instrument via the L command
# (clock set to 2014-01-01 00:00:00) with sampling set to start 540 days
# (~18 months) later and stop 365 days after that. SAMI and Device1
# (external SBE pump) are set to run every 60 minutes, but will be polled
# on a regular schedule rather than autosampled. Device1 is not configured
# to run after the SAMI and will run for 10 seconds. To configure the
# instrument using this string, add a null byte (00) to the end of the
# string.
VALID_CONFIG_STRING = 'CEE90B0002C7EA0001E133800A000E100402000E10010B' + \
'000000000D000000000D000000000D07' + \
'1020FF54181C0100381E' + \
'000000000000000000000000000000000000000000000000000' + \
'000000000000000000000000000000000000000000000000000' + \
'0000000000000000000000000000' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + SAMI_NEWLINE
# Data records -- SAMI and Device1 (external pump) (responses to R0 and R1
# commands, respectively)
VALID_R0_BLANK_SAMPLE = '*542705CEE91CC800400019096206800730074C2CE042' + \
'74003B0018096106800732074E0D82066124' + SAMI_NEWLINE
VALID_R0_DATA_SAMPLE = '*542704CEE91CC8003B001909620155073003E908A1232' + \
'D0043001A09620154072F03EA0D92065F3B' + SAMI_NEWLINE
VALID_R1_SAMPLE = '*540711CEE91DE2CE' + SAMI_NEWLINE
###
# Parameter and Type Definitions
###
_driver_parameters = {
# Parameters defined in the IOS
Parameter.LAUNCH_TIME: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00000000, VALUE: 0xCEE90B00},
Parameter.START_TIME_FROM_LAUNCH: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x02C7EA00, VALUE: 0x02C7EA00},
Parameter.STOP_TIME_FROM_START: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x01E13380, VALUE: 0x01E13380},
Parameter.MODE_BITS: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0A, VALUE: 0x0A},
Parameter.SAMI_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000E10, VALUE: 0x000E10},
Parameter.SAMI_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x04, VALUE: 0x04},
Parameter.SAMI_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x02, VALUE: 0x02},
Parameter.DEVICE1_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000E10, VALUE: 0x000E10},
Parameter.DEVICE1_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x01, VALUE: 0x01},
Parameter.DEVICE1_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0B, VALUE: 0x0B},
Parameter.DEVICE2_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000},
Parameter.DEVICE2_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00},
Parameter.DEVICE2_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0D, VALUE: 0x0D},
Parameter.DEVICE3_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000},
Parameter.DEVICE3_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00},
Parameter.DEVICE3_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0D, VALUE: 0x0D},
Parameter.PRESTART_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000},
Parameter.PRESTART_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00},
Parameter.PRESTART_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0D, VALUE: 0x0D},
Parameter.GLOBAL_CONFIGURATION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x07, VALUE: 0x07},
Parameter.PUMP_PULSE: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x10, VALUE: 0x10},
Parameter.PUMP_DURATION: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x20, VALUE: 0x20},
Parameter.SAMPLES_PER_MEASUREMENT: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0xFF, VALUE: 0xFF},
Parameter.CYCLES_BETWEEN_BLANKS: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x54, VALUE: 0x54},
Parameter.NUMBER_REAGENT_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x18, VALUE: 0x18},
Parameter.NUMBER_BLANK_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x1C, VALUE: 0x1C},
Parameter.FLUSH_PUMP_INTERVAL: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x01, VALUE: 0x01},
Parameter.BIT_SWITCHES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00},
Parameter.NUMBER_EXTRA_PUMP_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x38, VALUE: 0x38},
Parameter.EXTERNAL_PUMP_SETTINGS: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x1E, VALUE: 0x1E},
Parameter.AUTO_SAMPLE_INTERVAL: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 3600, VALUE: 3600},
Parameter.EXTERNAL_PUMP_DELAY: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 360, VALUE: 360},
Parameter.REAGENT_FLUSH_DURATION: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x08, VALUE: 0x08, REQUIRED: True},
Parameter.DEIONIZED_WATER_FLUSH_DURATION: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x08, VALUE: 0x08, REQUIRED: True},
Parameter.PUMP_100ML_CYCLES: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x01, VALUE: 0x01, REQUIRED: True},
}
_sami_data_sample_parameters = {
# SAMI Type 4/5 sample (in this case it is a Type 4)
Pco2wSamiSampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0x27, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x04, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCEE91CC8, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.LIGHT_MEASUREMENTS: {TYPE: list, VALUE: [0x003B, 0x0019, 0x0962, 0x0155,
0x0730, 0x03E9, 0x08A1, 0x232D,
0x0043, 0x001A, 0x0962, 0x0154,
0x072F, 0x03EA], REQUIRED: True},
Pco2wSamiSampleDataParticleKey.VOLTAGE_BATTERY: {TYPE: int, VALUE: 0x0D92, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.THERMISTER_RAW: {TYPE: int, VALUE: 0x065F, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0x3B, REQUIRED: True}
}
_sami_blank_sample_parameters = {
# SAMI Type 4/5 sample (in this case it is a Type 5)
Pco2wSamiSampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0x27, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x05, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCEE91CC8, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.LIGHT_MEASUREMENTS: {TYPE: list, VALUE: [0x0040, 0x0019, 0x0962, 0x0680, 0x0730,
0x074C, 0x2CE0, 0x4274, 0x003B, 0x0018,
0x0961, 0x0680, 0x0732, 0x074E],
REQUIRED: True},
Pco2wSamiSampleDataParticleKey.VOLTAGE_BATTERY: {TYPE: int, VALUE: 0x0D82, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.THERMISTER_RAW: {TYPE: int, VALUE: 0x0661, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0x24, REQUIRED: True}
}
_dev1_sample_parameters = {
# Device 1 (external pump) Type 17 sample
Pco2wbDev1SampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wbDev1SampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0x07, REQUIRED: True},
Pco2wbDev1SampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x11, REQUIRED: True},
Pco2wbDev1SampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCEE91DE2, REQUIRED: True},
Pco2wbDev1SampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0xCE, REQUIRED: True}
}
_configuration_parameters = {
# Configuration settings
Pco2wConfigurationDataParticleKey.LAUNCH_TIME: {TYPE: int, VALUE: 0xCEE90B00, REQUIRED: True},
Pco2wConfigurationDataParticleKey.START_TIME_OFFSET: {TYPE: int, VALUE: 0x02C7EA00, REQUIRED: True},
Pco2wConfigurationDataParticleKey.RECORDING_TIME: {TYPE: int, VALUE: 0x01E13380, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PMI_SAMPLE_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SAMI_SAMPLE_SCHEDULE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT1_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT1_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT2_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT2_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT3_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT3_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.TIMER_INTERVAL_SAMI: {TYPE: int, VALUE: 0x000E10, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DRIVER_ID_SAMI: {TYPE: int, VALUE: 0x04, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PARAMETER_POINTER_SAMI: {TYPE: int, VALUE: 0x02, REQUIRED: True},
Pco2wConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE1: {TYPE: int, VALUE: 0x000E10, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DRIVER_ID_DEVICE1: {TYPE: int, VALUE: 0x01, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE1: {TYPE: int, VALUE: 0x0B, REQUIRED: True},
Pco2wConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE2: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DRIVER_ID_DEVICE2: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE2: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2wConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE3: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DRIVER_ID_DEVICE3: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE3: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2wConfigurationDataParticleKey.TIMER_INTERVAL_PRESTART: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DRIVER_ID_PRESTART: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PARAMETER_POINTER_PRESTART: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2wConfigurationDataParticleKey.USE_BAUD_RATE_57600: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SEND_RECORD_TYPE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SEND_LIVE_RECORDS: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2wConfigurationDataParticleKey.EXTEND_GLOBAL_CONFIG: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PUMP_PULSE: {TYPE: int, VALUE: 0x10, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PUMP_DURATION: {TYPE: int, VALUE: 0x20, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SAMPLES_PER_MEASUREMENT: {TYPE: int, VALUE: 0xFF, REQUIRED: True},
Pco2wConfigurationDataParticleKey.CYCLES_BETWEEN_BLANKS: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wConfigurationDataParticleKey.NUMBER_REAGENT_CYCLES: {TYPE: int, VALUE: 0x18, REQUIRED: True},
Pco2wConfigurationDataParticleKey.NUMBER_BLANK_CYCLES: {TYPE: int, VALUE: 0x1C, REQUIRED: True},
Pco2wConfigurationDataParticleKey.FLUSH_PUMP_INTERVAL: {TYPE: int, VALUE: 0x01, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DISABLE_START_BLANK_FLUSH: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.MEASURE_AFTER_PUMP_PULSE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.NUMBER_EXTRA_PUMP_CYCLES: {TYPE: int, VALUE: 0x38, REQUIRED: True},
Pco2wConfigurationDataParticleKey.EXTERNAL_PUMP_SETTINGS: {TYPE: int, VALUE: 0x1E, REQUIRED: True}
}
###
# Driver Parameter Methods
###
def assert_driver_parameters(self, current_parameters, verify_values=False):
"""
Verify that all driver parameters are correct and potentially verify
values.
@param current_parameters: driver parameters read from the driver
instance
@param verify_values: should we verify values against definition?
"""
self.assert_parameters(current_parameters, self._driver_parameters,
verify_values)
def assert_particle_sami_data_sample(self, data_particle, verify_values=False):
"""
Verify sami_data_sample particle (Type 4)
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
self.assertEqual(record_type, 4, msg="Not a regular sample, record_type = %d" % record_type)
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_data_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.SAMI_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._sami_data_sample_parameters,
verify_values)
def assert_particle_sami_blank_sample(self, data_particle, verify_values=False):
"""
Verify sami_blank_sample particle (Type 5)
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
self.assertEqual(record_type, 5, msg="Not a blank sample, record_type = %d" % record_type)
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_blank_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.SAMI_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._sami_blank_sample_parameters,
verify_values)
def assert_particle_dev1_sample(self, data_particle, verify_values=False):
"""
Verify dev1_sample particle
@param data_particle: Pco2wDev1SampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
self.assertEqual(record_type, 17, msg="Not a device 1 sample, record_type = %d" % record_type)
self.assert_data_particle_keys(Pco2wbDev1SampleDataParticleKey,
self._dev1_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.PCO2W_B_DEV1_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._dev1_sample_parameters,
verify_values)
def assert_particle_configuration(self, data_particle, verify_values=False):
"""
Verify configuration particle
@param data_particle: Pco2wConfigurationDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(Pco2wConfigurationDataParticleKey,
self._configuration_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.PCO2W_B_CONFIGURATION)
self.assert_data_particle_parameters(data_particle,
self._configuration_parameters,
verify_values)
###############################################################################
# UNIT TESTS #
# Unit Tests: test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
@attr('UNIT', group='mi')
class DriverUnitTest(Pco2DriverUnitTest, DriverTestMixinSub):
capabilities_test_dict = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.WAITING: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.COMMAND: ['DRIVER_EVENT_GET',
'DRIVER_EVENT_SET',
'DRIVER_EVENT_START_DIRECT',
'DRIVER_EVENT_ACQUIRE_STATUS',
'DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_ACQUIRE_BLANK_SAMPLE',
'DRIVER_EVENT_START_AUTOSAMPLE',
'DRIVER_EVENT_DEIONIZED_WATER_FLUSH',
'DRIVER_EVENT_REAGENT_FLUSH',
'DRIVER_EVENT_DEIONIZED_WATER_FLUSH_100ML',
'DRIVER_EVENT_REAGENT_FLUSH_100ML',
'DRIVER_EVENT_RUN_EXTERNAL_PUMP'],
ProtocolState.DEIONIZED_WATER_FLUSH: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.REAGENT_FLUSH: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.DEIONIZED_WATER_FLUSH_100ML: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.REAGENT_FLUSH_100ML: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.RUN_EXTERNAL_PUMP: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.AUTOSAMPLE: ['DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_ACQUIRE_BLANK_SAMPLE',
'DRIVER_EVENT_STOP_AUTOSAMPLE',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.DIRECT_ACCESS: ['EXECUTE_DIRECT',
'DRIVER_EVENT_STOP_DIRECT'],
ProtocolState.POLLED_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.POLLED_BLANK_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.SCHEDULED_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.SCHEDULED_BLANK_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
}
def test_base_driver_enums(self):
"""
Verify that all the SAMI Instrument driver enumerations have no
duplicate values that might cause confusion. Also do a little
extra validation for the Capabilites
Extra enumeration tests are done in a specific subclass
"""
# Test Enums defined in the base SAMI driver
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
# Test capabilites for duplicates, then verify that capabilities
# is a subset of proto events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might
cause confusion.
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(InstrumentCommand())
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(Protocol.sieve_function)
for part in [self.VALID_STATUS_MESSAGE, self.VALID_CONTROL_RECORD, self.VALID_R0_BLANK_SAMPLE,
self.VALID_R0_DATA_SAMPLE, self.VALID_R1_SAMPLE, self.VALID_CONFIG_STRING]:
self.assert_chunker_sample(chunker, part)
self.assert_chunker_sample_with_noise(chunker, part)
self.assert_chunker_fragmented_sample(chunker, part)
self.assert_chunker_combined_sample(chunker, part)
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the
correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# Start validating data particles
self.assert_particle_published(driver, self.VALID_STATUS_MESSAGE,
self.assert_particle_regular_status, True)
self.assert_particle_published(driver, self.VALID_CONTROL_RECORD,
self.assert_particle_control_record, True)
self.assert_particle_published(driver, self.VALID_R0_BLANK_SAMPLE,
self.assert_particle_sami_blank_sample, True)
self.assert_particle_published(driver, self.VALID_R0_DATA_SAMPLE,
self.assert_particle_sami_data_sample, True)
self.assert_particle_published(driver, self.VALID_R1_SAMPLE,
self.assert_particle_dev1_sample, True)
self.assert_particle_published(driver, self.VALID_CONFIG_STRING,
self.assert_particle_configuration, True)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities. Iterate through available
capabilities, and verify that they can pass successfully through the
filter. Test silly made up capabilities to verify they are blocked by
filter.
"""
mock_callback = Mock()
protocol = Protocol(Prompt, SAMI_NEWLINE, mock_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in
this dict must also be defined in the protocol FSM. Note, the EXIT and
ENTER DRIVER_EVENTS don't need to be listed here.
"""
# capabilities defined in base class test_driver.
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, self.capabilities_test_dict)
def test_pump_commands(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_pump_commands(driver)
def test_pump_timing(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_pump_timing(driver)
def test_waiting_discover(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_waiting_discover(driver)
def test_autosample_timing(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_autosample_timing(driver)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class DriverIntegrationTest(Pco2DriverIntegrationTest, DriverTestMixinSub):
"""
Integration Tests:
test_startup_params: Verify that driver startup parameters are set properly.
test_set: In command state, test configuration particle generation.
Parameter.PUMP_PULSE
Parameter.PUMP_DURATION
Parameter.SAMPLES_PER_MEASUREMENT
Parameter.CYCLES_BETWEEN_BLANKS
Parameter.NUMBER_REAGENT_CYCLES
Parameter.NUMBER_BLANK_CYCLES
Parameter.FLUSH_PUMP_INTERVAL
Parameter.BIT_SWITCHES
Parameter.NUMBER_EXTRA_PUMP_CYCLES
Parameter.AUTO_SAMPLE_INTERVAL
Negative Set Tests:
START_TIME_FROM_LAUNCH
STOP_TIME_FROM_START
MODE_BITS
SAMI_SAMPLE_INTERVAL
test_commands: In autosample and command states, test particle generation.
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
ACQUIRE_BLANK_SAMPLE = ProtocolEvent.ACQUIRE_BLANK_SAMPLE
test_autosample: Test autosample particle generation.
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
test_scheduled_data: In command and autosample states
ACQUIRE_STATUS
"""
def test_startup_params(self):
startup_values = {
Parameter.PUMP_PULSE: 0x10,
Parameter.PUMP_DURATION: 0x20,
Parameter.SAMPLES_PER_MEASUREMENT: 0xFF,
Parameter.CYCLES_BETWEEN_BLANKS: 0x54,
Parameter.NUMBER_REAGENT_CYCLES: 0x18,
Parameter.NUMBER_BLANK_CYCLES: 0x1C,
Parameter.FLUSH_PUMP_INTERVAL: 0x01,
Parameter.BIT_SWITCHES: 0x01,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 0x38,
Parameter.EXTERNAL_PUMP_SETTINGS: 0x1E,
Parameter.EXTERNAL_PUMP_DELAY: 10,
Parameter.AUTO_SAMPLE_INTERVAL: 3600,
Parameter.REAGENT_FLUSH_DURATION: 0x08,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 0x08,
Parameter.PUMP_100ML_CYCLES: 1
}
new_values = {
Parameter.PUMP_PULSE: 0x11,
Parameter.PUMP_DURATION: 0x21,
Parameter.SAMPLES_PER_MEASUREMENT: 0xFA,
Parameter.CYCLES_BETWEEN_BLANKS: 0xA9,
Parameter.NUMBER_REAGENT_CYCLES: 0x19,
Parameter.NUMBER_BLANK_CYCLES: 0x1D,
Parameter.FLUSH_PUMP_INTERVAL: 0x02,
Parameter.BIT_SWITCHES: 0x02,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 0x39,
Parameter.EXTERNAL_PUMP_SETTINGS: 0x40,
Parameter.EXTERNAL_PUMP_DELAY: 300,
Parameter.AUTO_SAMPLE_INTERVAL: 600,
Parameter.REAGENT_FLUSH_DURATION: 0x01,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 0x0F,
Parameter.PUMP_100ML_CYCLES: 14
}
self.assert_initialize_driver()
for (key, val) in startup_values.iteritems():
self.assert_get(key, val)
self.assert_set_bulk(new_values)
self.driver_client.cmd_dvr('apply_startup_params')
for (key, val) in startup_values.iteritems():
self.assert_get(key, val)
def test_set(self):
self.assert_initialize_driver()
self.assert_set(Parameter.AUTO_SAMPLE_INTERVAL, 77)
self.assert_set(Parameter.CYCLES_BETWEEN_BLANKS, 7)
self.assert_set(Parameter.PUMP_PULSE, 20)
self.assert_set(Parameter.SAMPLES_PER_MEASUREMENT, 239)
self.assert_set(Parameter.NUMBER_REAGENT_CYCLES, 26)
self.assert_set(Parameter.NUMBER_BLANK_CYCLES, 30)
self.assert_set(Parameter.FLUSH_PUMP_INTERVAL, 2)
self.assert_set(Parameter.BIT_SWITCHES, 1)
self.assert_set(Parameter.NUMBER_EXTRA_PUMP_CYCLES, 88)
self.assert_set(Parameter.EXTERNAL_PUMP_SETTINGS, 40)
self.assert_set(Parameter.EXTERNAL_PUMP_DELAY, 60)
self.assert_set(Parameter.REAGENT_FLUSH_DURATION, 16)
self.assert_set(Parameter.DEIONIZED_WATER_FLUSH_DURATION, 4)
self.assert_set(Parameter.PUMP_100ML_CYCLES, 14)
self.assert_set_readonly(Parameter.START_TIME_FROM_LAUNCH, 84600)
self.assert_set_readonly(Parameter.STOP_TIME_FROM_START, 84600)
self.assert_set_readonly(Parameter.MODE_BITS, 10)
self.assert_set_readonly(Parameter.SAMI_SAMPLE_INTERVAL, 1800)
def test_bulk_set(self):
self.assert_initialize_driver()
new_values = {
Parameter.AUTO_SAMPLE_INTERVAL: 77,
Parameter.CYCLES_BETWEEN_BLANKS: 7,
Parameter.PUMP_PULSE: 20,
Parameter.SAMPLES_PER_MEASUREMENT: 239,
Parameter.NUMBER_REAGENT_CYCLES: 26,
Parameter.NUMBER_BLANK_CYCLES: 30,
Parameter.FLUSH_PUMP_INTERVAL: 2,
Parameter.BIT_SWITCHES: 1,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 88,
Parameter.EXTERNAL_PUMP_SETTINGS: 40,
Parameter.EXTERNAL_PUMP_DELAY: 60,
Parameter.REAGENT_FLUSH_DURATION: 4,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 16,
Parameter.PUMP_100ML_CYCLES: 14
}
self.assert_set_bulk(new_values)
def test_bad_parameters(self):
self.assert_initialize_driver()
self.assert_set_exception(Parameter.CYCLES_BETWEEN_BLANKS, 7.0)
self.assert_set_exception(Parameter.PUMP_PULSE, 20.0)
self.assert_set_exception(Parameter.SAMPLES_PER_MEASUREMENT, 239.0)
self.assert_set_exception(Parameter.NUMBER_REAGENT_CYCLES, 26.0)
self.assert_set_exception(Parameter.NUMBER_BLANK_CYCLES, 30.0)
self.assert_set_exception(Parameter.FLUSH_PUMP_INTERVAL, 2.0)
self.assert_set_exception(Parameter.BIT_SWITCHES, 1.0)
self.assert_set_exception(Parameter.NUMBER_EXTRA_PUMP_CYCLES, 88.0)
self.assert_set_exception(Parameter.EXTERNAL_PUMP_SETTINGS, 40.0)
## EXTERNAL_PUMP_DELAY is set to 10 seconds in the startup_config. It defaults to 10 minutes
def test_external_pump_delay(self):
"""
Test delay between running of external pump and taking a sample
"""
max_sample_time = 15 # Maximum observed sample time with current configuration.
global dev1_sample
global data_sample
def get_dev1_sample(particle):
"""
Get dev1 sample
:param particle: dev1 sample particle
"""
global dev1_sample
dev1_sample = particle
def get_data_sample(particle):
"""
Get data sample
:param particle: data sample particle
"""
global data_sample
data_sample = particle
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, get_dev1_sample, timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, get_data_sample, timeout=180)
dev1_dict = self.get_data_particle_values_as_dict(dev1_sample)
sample_dict = self.get_data_particle_values_as_dict(data_sample)
dev1_time = dev1_dict.get(Pco2wbDev1SampleDataParticleKey.RECORD_TIME)
sample_time = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TIME)
time_diff = sample_time - dev1_time
self.assertTrue((time_diff > 10) and (time_diff < (10 + max_sample_time)),
"External pump delay %s is invalid" % time_diff)
self.assert_set(Parameter.EXTERNAL_PUMP_DELAY, 60)
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, get_dev1_sample, timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, get_data_sample, timeout=180)
dev1_dict = self.get_data_particle_values_as_dict(dev1_sample)
sample_dict = self.get_data_particle_values_as_dict(data_sample)
dev1_time = dev1_dict.get(Pco2wbDev1SampleDataParticleKey.RECORD_TIME)
sample_time = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TIME)
time_diff = sample_time - dev1_time
self.assertTrue((time_diff > 60) and (time_diff < (60 + max_sample_time)),
"External pump delay %s is invalid" % time_diff)
def test_acquire_sample(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=180)
def test_acquire_blank_sample(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_blank_sample,
timeout=180)
def test_auto_sample(self):
self.assert_initialize_driver()
self.assert_set(Parameter.AUTO_SAMPLE_INTERVAL, 80)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=4, timeout=400)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
particle_count=4)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
self.clear_events()
#Now verify that no more particles get generated
failed = False
try:
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=240)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE,
self.assert_particle_dev1_sample)
failed = True
except AssertionError:
pass
self.assertFalse(failed)
#Restart autosample
self.clear_events()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=4, timeout=400)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
particle_count=4)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_polled_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE, state=ProtocolState.POLLED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=180)
def test_polled_blank_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, state=ProtocolState.POLLED_BLANK_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_blank_sample,
timeout=180)
def test_scheduled_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=180)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_scheduled_blank_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=180)
self.clear_events()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, state=ProtocolState.SCHEDULED_BLANK_SAMPLE,
delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_blank_sample,
timeout=180)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_scheduled_device_status_auto_sample(self):
"""
Verify the device status command can be triggered and run in autosample
"""
self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, delay=180)
self.clear_events()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_CONFIGURATION,
self.assert_particle_configuration,
timeout=300)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_BATTERY_VOLTAGE,
self.assert_particle_battery_voltage)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_THERMISTOR_VOLTAGE,
self.assert_particle_thermistor_voltage)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
def test_queued_command(self):
"""
Verify status is queued while samples are being taken
"""
self.assert_initialize_driver()
## Queue status
self.clear_events()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=1, timeout=220)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
particle_count=1, timeout=60)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_REGULAR_STATUS,
self.assert_particle_regular_status,
timeout=180)
self.assert_current_state(ProtocolState.COMMAND)
def test_queued_autosample(self):
"""
Verify status is queued while samples are being taken
"""
self.assert_initialize_driver()
self.clear_events()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
## Queue status
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=1, timeout=220)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
particle_count=1, timeout=60)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_REGULAR_STATUS,
self.assert_particle_regular_status,
timeout=180)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
def test_acquire_status(self):
self.assert_initialize_driver()
self.clear_events()
self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS, DataParticleType.PCO2W_B_REGULAR_STATUS,
self.assert_particle_regular_status)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_CONFIGURATION,
self.assert_particle_configuration)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_BATTERY_VOLTAGE,
self.assert_particle_battery_voltage)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_THERMISTOR_VOLTAGE,
self.assert_particle_thermistor_voltage)
def test_scheduled_device_status_command(self):
"""
Verify the device status command can be triggered and run in command
"""
self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, delay=120)
self.clear_events()
self.assert_async_particle_generation(DataParticleType.PCO2W_B_CONFIGURATION,
self.assert_particle_configuration,
timeout=180)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_BATTERY_VOLTAGE,
self.assert_particle_battery_voltage)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_THERMISTOR_VOLTAGE,
self.assert_particle_thermistor_voltage)
self.assert_current_state(ProtocolState.COMMAND)
def test_run_external_pump(self):
"""
Test running external pump and queueing status
"""
self.assert_initialize_driver()
self.clear_events()
self.assert_driver_command(ProtocolEvent.RUN_EXTERNAL_PUMP)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=20.0)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_REGULAR_STATUS,
self.assert_particle_regular_status,
timeout=20.0)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class DriverQualificationTest(Pco2DriverQualificationTest, DriverTestMixinSub):
@unittest.skip("Runs for several hours to test default autosample rate of 60 minutes")
def test_overnight(self):
"""
Verify autosample at default rate
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.BIT_SWITCHES, 0x00)
self.assert_set_parameter(Parameter.EXTERNAL_PUMP_DELAY, 360)
request_sample = time.time()
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_dev1_sample,
DataParticleType.PCO2W_B_DEV1_SAMPLE, sample_count=1, timeout=120)
receive_dev1_sample = time.time()
dev1_sample_time = receive_dev1_sample - request_sample
self.assert_sample_async(self.assert_particle_sami_blank_sample, DataParticleType.SAMI_SAMPLE, timeout=800)
receive_sample = time.time()
sample_time = receive_sample - request_sample
log.debug("dev1_sample_time = %s", dev1_sample_time)
log.debug("sample_time = %s", sample_time)
self.assert_sample_autosample(self.assert_particle_sami_data_sample, DataParticleType.SAMI_SAMPLE,
timeout=14400)
def test_direct_access_telnet_mode(self):
"""
@brief This test manually tests that the Instrument Driver properly
supports direct access to the physical instrument. (telnet mode)
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.CYCLES_BETWEEN_BLANKS, 7)
configuration_string = 'CF87945A02C7EA0001E133800A000E100402000E10010B0000000000000000000000000000000' + \
'71020FFA8181C0100383C00000000000000000000000000000000000000000000000000000000' + \
'00000000000000000000000000000000000000000000000000000000000000000000000000000' + \
'0FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'
self.assert_direct_access_start_telnet()
self.assertTrue(self.tcp_client)
# Erase memory
self.tcp_client.send_data("E5A%s" % SAMI_NEWLINE)
time.sleep(1)
# Load a new configuration string changing X to X
self.tcp_client.send_data("L5A%s" % SAMI_NEWLINE)
time.sleep(1)
self.tcp_client.send_data("%s00%s" % (configuration_string, SAMI_NEWLINE))
time.sleep(1)
# Check that configuration was changed
self.tcp_client.send_data("L%s" % SAMI_NEWLINE)
return_value = self.tcp_client.expect(configuration_string)
self.assertTrue(return_value)
###
# Add instrument specific code here.
###
self.assert_direct_access_stop_telnet()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_get_parameter(Parameter.CYCLES_BETWEEN_BLANKS, 7)
def test_command_poll(self):
self.assert_enter_command_mode()
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_dev1_sample,
DataParticleType.PCO2W_B_DEV1_SAMPLE, sample_count=1, timeout=200)
self.assert_sample_async(self.assert_particle_sami_data_sample, DataParticleType.SAMI_SAMPLE, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, self.assert_particle_dev1_sample,
DataParticleType.PCO2W_B_DEV1_SAMPLE, sample_count=1, timeout=200)
self.assert_sample_async(self.assert_particle_sami_blank_sample, DataParticleType.SAMI_SAMPLE, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.PCO2W_B_REGULAR_STATUS, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_configuration,
DataParticleType.PCO2W_B_CONFIGURATION, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_battery_voltage,
DataParticleType.PCO2W_B_BATTERY_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_thermistor_voltage,
DataParticleType.PCO2W_B_THERMISTOR_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.RUN_EXTERNAL_PUMP, self.assert_particle_dev1_sample,
DataParticleType.PCO2W_B_DEV1_SAMPLE, sample_count=1, timeout=200)
self.assert_resource_command(ProtocolEvent.DEIONIZED_WATER_FLUSH, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.REAGENT_FLUSH, delay=15, agent_state=ResourceAgentState.COMMAND,
resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.DEIONIZED_WATER_FLUSH_100ML, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.REAGENT_FLUSH_100ML, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
def test_autosample_poll(self):
self.assert_enter_command_mode()
self.assert_start_autosample(timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_dev1_sample,
DataParticleType.PCO2W_B_DEV1_SAMPLE, sample_count=1, timeout=200)
self.assert_sample_async(self.assert_particle_sami_data_sample, DataParticleType.SAMI_SAMPLE, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, self.assert_particle_dev1_sample,
DataParticleType.PCO2W_B_DEV1_SAMPLE, sample_count=1, timeout=200)
self.assert_sample_async(self.assert_particle_sami_blank_sample, DataParticleType.SAMI_SAMPLE, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.PCO2W_B_REGULAR_STATUS, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_configuration,
DataParticleType.PCO2W_B_CONFIGURATION, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_battery_voltage,
DataParticleType.PCO2W_B_BATTERY_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_thermistor_voltage,
DataParticleType.PCO2W_B_THERMISTOR_VOLTAGE, sample_count=1, timeout=10)
self.assert_stop_autosample()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
def test_autosample(self):
"""
Verify autosample works and data particles are created
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.AUTO_SAMPLE_INTERVAL, 80)
self.assert_sample_autosample(self.assert_particle_sami_data_sample, DataParticleType.SAMI_SAMPLE)
def test_get_capabilities(self):
"""
@brief Verify that the correct capabilities are returned from get_capabilities
at various driver/agent states.
"""
self.assert_enter_command_mode()
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.ACQUIRE_BLANK_SAMPLE,
ProtocolEvent.DEIONIZED_WATER_FLUSH,
ProtocolEvent.REAGENT_FLUSH,
ProtocolEvent.DEIONIZED_WATER_FLUSH_100ML,
ProtocolEvent.REAGENT_FLUSH_100ML,
ProtocolEvent.RUN_EXTERNAL_PUMP
],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_capabilities(capabilities)
##################
# DA Mode
##################
da_capabilities = copy.deepcopy(capabilities)
da_capabilities[AgentCapabilityType.AGENT_COMMAND] = [ResourceAgentEvent.GO_COMMAND]
da_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
# Test direct access disconnect
self.assert_direct_access_start_telnet(timeout=10)
self.assertTrue(self.tcp_client)
self.assert_capabilities(da_capabilities)
self.tcp_client.disconnect()
# Now do it again, but use the event to stop DA
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_direct_access_start_telnet(timeout=10)
self.assert_capabilities(da_capabilities)
self.assert_direct_access_stop_telnet()
##################
# Command Mode
##################
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
##################
# Streaming Mode
##################
st_capabilities = copy.deepcopy(capabilities)
st_capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.STREAMING)
st_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [
ProtocolEvent.STOP_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.ACQUIRE_BLANK_SAMPLE
]
self.assert_start_autosample(timeout=200)
self.assert_capabilities(st_capabilities)
self.assert_stop_autosample()
##################
# Command Mode
##################
# We should be back in command mode from DA.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
self.assert_reset()
self.assert_capabilities(capabilities) |
import sys
import os
import time
with open(sys.argv[2], "rt") as file:
for line in file:
os.system("./scrape"+sys.argv[1]+" "+line)
time.sleep(1) #sleep between so you can quit and so it doesn't spam the site with requests. You can delete this line if you know the site won't mind, but beware you'll need to hold down control an c for each of the go programs because of the way that they run
Add in shebang for go-deeper.py.
#! /usr/bin/env python3
import sys
import os
import time
with open(sys.argv[2], "rt") as file:
for line in file:
os.system("./scrape"+sys.argv[1]+" "+line)
time.sleep(1) #sleep between so you can quit and so it doesn't spam the site with requests. You can delete this line if you know the site won't mind, but beware you'll need to hold down control an c for each of the go programs because of the way that they run
|
import pymongo
import re
from datetime import datetime
from bs4 import BeautifulSoup
from hracing.tools import cols_from_html_tbl
from hracing.tools import isnumber
from hracing.tools import bf4_text # checks if bf4 elements exist
from IPython.core.debugger import set_trace
def parse_racesheet(racesheet,forms,verbose = False):
""" Parse html in racesheet, get content, return race as hierarchical dict """
#Parse racesheet and get basic html segments
html = BeautifulSoup(racesheet.content,'html5lib')
race = _parse_race_level(html)
race["horses"] = _parse_horse_level(html,forms)
race["finish"] = _parse_finish(html)
return race
def _parse_race_level(html):
#Extract race-level data from html, return as dict
top_container = html.find('div',{'id':'racecardTopContainer'})
race_info_box = top_container.find('div',{'class':'racesList'})
race = {}
race['race_ID'] = int(top_container["data-raceid"])
n_starter_raw = bf4_text(race_info_box.find('li',{'class':'starter'}))
race['n_starter'] = int(n_starter_raw.split()[0]) if n_starter_raw else 0
dateraw = bf4_text(top_container.h1).split()[-1]
timeraw = bf4_text(race_info_box.find('li',{'class':'time'}))
race['race_date_time'] = datetime.strptime(dateraw + '_' + timeraw,'%d.%m.%Y_%H:%M')
country_raw = top_container.h2.a['href']
country = re.findall('/races\?country\s*=\s*(...)&date\s*=',country_raw)
race['country'] = country[0]
race['race_name'] = bf4_text(top_container.h2).strip()
distance = bf4_text(race_info_box.find('li',{'class':'distance'}))
race['distance'] = float(distance.split()[0]) if distance else float('nan')
race['ground']=bf4_text(race_info_box.find('li',{'class':'raceGround'}))
race['type_long'] = bf4_text(race_info_box.find('div',{'class':'raceTypeAllowance'}))
race['type_short'] = top_container.i["class"][1]
race['race_number'] = int(bf4_text(race_info_box('div',{'class':'counter'})[0]).strip())
stakes_raw = bf4_text(race_info_box.find('li',{'class':'stakes'}))
race['stakes'] = float(stakes_raw.split()[0]) if stakes_raw else float('nan')
race['currency'] = stakes_raw.split()[1] if stakes_raw else ""
return race
def _parse_horse_level(html,forms):
#Parse html for racecards, return a list of dicts with horse-level info
racecard = html.find('div',{'class':'racecardList'}) #single horse containers
horse_clearfixes = racecard.find_all('li',{'class','clearfix'})
raw_starter_no1 = html.find_all('span',{'class':'count1'})
raw_starter_no2 = html.find_all('span',{'class':'count2'})
raw_name = html.find_all('span',{'class':'runnername'})
raw_jockey = html.find_all('span',{'class':'jockeyname'})
raw_trainer = html.find_all('div',{'class':'jockeytrainer'})
statInfo = html.find_all('div',{'class':'statInfo'})
raw_weight = html.find_all('span',{'class':'weight'})
raw_age_and_sex = html.find_all('span',{'class':'horseage'})
raw_odd = html.find_all('span',{'class':'odds'})
horse_list = []
#Loop over all horses and store corresponding data in dict
for i,clearfix in enumerate(horse_clearfixes):
horse={}
horse['nonrunner'] = True if 'nonrunner' in horse_clearfixes[i]['class'] else False
horse['starter_no1'] =bf4_text(raw_starter_no1[i])
horse['starter_no2'] =bf4_text(raw_starter_no2[i])
horse['name'] = bf4_text(raw_name[i]).strip()
# Avoid writing horses without a name...
if horse['name'].strip():
horse['jockey'] = bf4_text(raw_jockey[i]).strip()
horse['trainer'] = bf4_text(raw_trainer[i]).strip()
heritage = statInfo[i].span.nextSibling
heritage = re.split('–',heritage)
horse['heritage1'] = heritage[0].strip()
horse['heritage2'] = heritage[1].strip()
horse['owner'] = statInfo[i].span.nextSibling.nextSibling.nextSibling
weight = bf4_text(raw_weight[i]).split() if raw_weight else ""
horse['weight'] = float(weight[0].strip().replace(',','.')) if weight else float('nan')
age_and_sex = bf4_text(raw_age_and_sex[i])
horse['age'] = float(age_and_sex.split('j. ')[0]) if age_and_sex else float('nan')
horse['sex'] = age_and_sex.split('j. ')[1] if age_and_sex else ""
odd_txt = bf4_text(raw_odd[i])
horse['odd'] = float(odd_txt.replace(',','.').replace('-','nan')) if odd_txt else float('nan')
horse['short_forms']=_extract_short_forms(clearfix.table)
horse['long_forms'] = _extract_long_forms(forms[i])
horse['long_forms'] = ""
else:
print('Warning: Horse with name: could not be parsed properly.')
horse = {}
horse_list.append(horse)
return horse_list
def _extract_short_forms(formen_table):
#Extract forms (prior race performance) from tables in racecards. If there are no prior formen, there are is no table...
if formen_table is not None:
currtable = cols_from_html_tbl(formen_table)
short_form={}
short_form['past_racedates'] = [datetime.strptime(i,'%d.%m.%y') for i in currtable[0]]
short_form['past_finishes'] = [float(i.strip('.')) if isnumber(i.strip('.'))
else float('nan') for i in currtable[1]]
short_form['past_race_courses'] = currtable[2]
short_form['past_distances'] = [float(i.strip(' m')) if isnumber(i.strip(' m'))
else float('nan') for i in currtable[3]]
short_form['past_stakes'] = [float(i) if isnumber(i)
else 'nan' for i in currtable[4]]
short_form['past_jockeys'] = currtable[5]
short_form['past_odds'] = [float(i.replace(',','.').replace('-','nan')) if isnumber(i)
else float('nan') for i in currtable[6]]
short_form['n_past_races'] = len(currtable[0])
else:
short_form={}
return short_form
def _extract_long_forms(form):
form_html = BeautifulSoup(form.content,'html5lib')
overview = form_html.find('section',{'id':'formguideOverview'})
form_main = form_html.find('section',{'id':'formguideForm'})
col = cols_from_html_tbl(form_main.table)
if col:
long_form={}
long_form['past_racedates'] = [datetime.strptime(i,'%d.%m.%Y') for i in col[0]]
long_form['past_race_courses'] = col[2]
long_form['past_finishes'] = [float(i.strip('.')) if isnumber(i.strip('.'))
else float('nan') for i in col[1]]
long_form['past_distances'] = [float(i.strip(' m')) if isnumber(i.strip(' m'))
else float('nan') for i in col[3]]
long_form['past_stakes'] = [float(i) if isnumber(i)
else 'nan' for i in col[4]]
long_form['past_jockeys'] = col[5]
long_form['past_odds'] = [float(i.replace(',','.').replace('-','nan')) if isnumber(i)
else float('nan') for i in col[6]]
long_form['n_past_races'] = len(col[0])
else:
long_form={}
return long_form
def _parse_finish(html):
#Parse html, return a list of dicts with finishers
finish_table = html.find('table',{'class':'finishTable'})
finish_list = []
if finish_table is not None:
finish_tbl = cols_from_html_tbl(finish_table)
for i,row in enumerate(finish_tbl[0]):
place = int(finish_tbl[0][i])
starter_no1 = finish_tbl[1][i]
name = finish_tbl[2][i]
odd = float(finish_tbl[3][i].replace(',','.').replace('-','NaN'))
jockey = finish_tbl[4][i]
info = finish_tbl[5][i]
finisher_out={"place" : place,
"starter_no1" : starter_no1,
"name" : name,
"odd" : odd,
"jockey" : jockey,
"info" : info
}
finish_list.append(finisher_out)
return finish_list
### TODO:
### 1.) ADD COMPARISON OF RACE_ID LIST WITH DB TO ALLOW CONTINUING DOWNLOADS
### 2.) PERFORM ONE COMPLETE IMPORT RUN ON NEW SITE
### 3.) DESCRIPTICE GRAPHS FOR PRESENTATION
### 3.) ADD FANCY GRAPHS TO DATA DESCRIPTION
### 5.) REFACTURE AND IMPELEMENT OLD PIPELINE SETUP AND ML
# Call mongoDB and dump race
def mongo_insert_race(race):
""" Take single race, add to local mongoDB, make race_ID index """
client = pymongo.MongoClient()
db = client.races
db.races.create_index([("race_ID", pymongo.ASCENDING)], unique=True)
results = db.races.insert_one(race)
Delete a STUPID mistake deleting long_formen just after being parsed.
import pymongo
import re
from datetime import datetime
from bs4 import BeautifulSoup
from hracing.tools import cols_from_html_tbl
from hracing.tools import isnumber
from hracing.tools import bf4_text # checks if bf4 elements exist
from IPython.core.debugger import set_trace
def parse_racesheet(racesheet,forms,verbose = False):
""" Parse html in racesheet, get content, return race as hierarchical dict """
#Parse racesheet and get basic html segments
html = BeautifulSoup(racesheet.content,'html5lib')
race = _parse_race_level(html)
race["horses"] = _parse_horse_level(html,forms)
race["finish"] = _parse_finish(html)
return race
def _parse_race_level(html):
#Extract race-level data from html, return as dict
top_container = html.find('div',{'id':'racecardTopContainer'})
race_info_box = top_container.find('div',{'class':'racesList'})
race = {}
race['race_ID'] = int(top_container["data-raceid"])
n_starter_raw = bf4_text(race_info_box.find('li',{'class':'starter'}))
race['n_starter'] = int(n_starter_raw.split()[0]) if n_starter_raw else 0
dateraw = bf4_text(top_container.h1).split()[-1]
timeraw = bf4_text(race_info_box.find('li',{'class':'time'}))
race['race_date_time'] = datetime.strptime(dateraw + '_' + timeraw,'%d.%m.%Y_%H:%M')
country_raw = top_container.h2.a['href']
country = re.findall('/races\?country\s*=\s*(...)&date\s*=',country_raw)
race['country'] = country[0]
race['race_name'] = bf4_text(top_container.h2).strip()
distance = bf4_text(race_info_box.find('li',{'class':'distance'}))
race['distance'] = float(distance.split()[0]) if distance else float('nan')
race['ground']=bf4_text(race_info_box.find('li',{'class':'raceGround'}))
race['type_long'] = bf4_text(race_info_box.find('div',{'class':'raceTypeAllowance'}))
race['type_short'] = top_container.i["class"][1]
race['race_number'] = int(bf4_text(race_info_box('div',{'class':'counter'})[0]).strip())
stakes_raw = bf4_text(race_info_box.find('li',{'class':'stakes'}))
race['stakes'] = float(stakes_raw.split()[0]) if stakes_raw else float('nan')
race['currency'] = stakes_raw.split()[1] if stakes_raw else ""
return race
def _parse_horse_level(html,forms):
#Parse html for racecards, return a list of dicts with horse-level info
racecard = html.find('div',{'class':'racecardList'}) #single horse containers
horse_clearfixes = racecard.find_all('li',{'class','clearfix'})
raw_starter_no1 = html.find_all('span',{'class':'count1'})
raw_starter_no2 = html.find_all('span',{'class':'count2'})
raw_name = html.find_all('span',{'class':'runnername'})
raw_jockey = html.find_all('span',{'class':'jockeyname'})
raw_trainer = html.find_all('div',{'class':'jockeytrainer'})
statInfo = html.find_all('div',{'class':'statInfo'})
raw_weight = html.find_all('span',{'class':'weight'})
raw_age_and_sex = html.find_all('span',{'class':'horseage'})
raw_odd = html.find_all('span',{'class':'odds'})
horse_list = []
#Loop over all horses and store corresponding data in dict
for i,clearfix in enumerate(horse_clearfixes):
horse={}
horse['nonrunner'] = True if 'nonrunner' in horse_clearfixes[i]['class'] else False
horse['starter_no1'] =bf4_text(raw_starter_no1[i])
horse['starter_no2'] =bf4_text(raw_starter_no2[i])
horse['name'] = bf4_text(raw_name[i]).strip()
# Avoid writing horses without a name...
if horse['name'].strip():
horse['jockey'] = bf4_text(raw_jockey[i]).strip()
horse['trainer'] = bf4_text(raw_trainer[i]).strip()
heritage = statInfo[i].span.nextSibling
heritage = re.split('–',heritage)
horse['heritage1'] = heritage[0].strip()
horse['heritage2'] = heritage[1].strip()
horse['owner'] = statInfo[i].span.nextSibling.nextSibling.nextSibling
weight = bf4_text(raw_weight[i]).split() if raw_weight else ""
horse['weight'] = float(weight[0].strip().replace(',','.')) if weight else float('nan')
age_and_sex = bf4_text(raw_age_and_sex[i])
horse['age'] = float(age_and_sex.split('j. ')[0]) if age_and_sex else float('nan')
horse['sex'] = age_and_sex.split('j. ')[1] if age_and_sex else ""
odd_txt = bf4_text(raw_odd[i])
horse['odd'] = float(odd_txt.replace(',','.').replace('-','nan')) if odd_txt else float('nan')
horse['short_forms']=_extract_short_forms(clearfix.table)
horse['long_forms'] = _extract_long_forms(forms[i])
else:
print('Warning: Horse with name: could not be parsed properly.')
horse = {}
horse_list.append(horse)
return horse_list
def _extract_short_forms(formen_table):
#Extract forms (prior race performance) from tables in racecards. If there are no prior formen, there are is no table...
if formen_table is not None:
currtable = cols_from_html_tbl(formen_table)
short_form={}
short_form['past_racedates'] = [datetime.strptime(i,'%d.%m.%y') for i in currtable[0]]
short_form['past_finishes'] = [float(i.strip('.')) if isnumber(i.strip('.'))
else float('nan') for i in currtable[1]]
short_form['past_race_courses'] = currtable[2]
short_form['past_distances'] = [float(i.strip(' m')) if isnumber(i.strip(' m'))
else float('nan') for i in currtable[3]]
short_form['past_stakes'] = [float(i) if isnumber(i)
else 'nan' for i in currtable[4]]
short_form['past_jockeys'] = currtable[5]
short_form['past_odds'] = [float(i.replace(',','.').replace('-','nan')) if isnumber(i)
else float('nan') for i in currtable[6]]
short_form['n_past_races'] = len(currtable[0])
else:
short_form={}
return short_form
def _extract_long_forms(form):
form_html = BeautifulSoup(form.content,'html5lib')
overview = form_html.find('section',{'id':'formguideOverview'})
form_main = form_html.find('section',{'id':'formguideForm'})
col = cols_from_html_tbl(form_main.table)
if col:
long_form={}
long_form['past_racedates'] = [datetime.strptime(i,'%d.%m.%Y') for i in col[0]]
long_form['past_race_courses'] = col[2]
long_form['past_finishes'] = [float(i.strip('.')) if isnumber(i.strip('.'))
else float('nan') for i in col[1]]
long_form['past_distances'] = [float(i.strip(' m')) if isnumber(i.strip(' m'))
else float('nan') for i in col[3]]
long_form['past_stakes'] = [float(i) if isnumber(i)
else 'nan' for i in col[4]]
long_form['past_jockeys'] = col[5]
long_form['past_odds'] = [float(i.replace(',','.').replace('-','nan')) if isnumber(i)
else float('nan') for i in col[6]]
long_form['n_past_races'] = len(col[0])
else:
long_form={}
return long_form
def _parse_finish(html):
#Parse html, return a list of dicts with finishers
finish_table = html.find('table',{'class':'finishTable'})
finish_list = []
if finish_table is not None:
finish_tbl = cols_from_html_tbl(finish_table)
for i,row in enumerate(finish_tbl[0]):
place = int(finish_tbl[0][i])
starter_no1 = finish_tbl[1][i]
name = finish_tbl[2][i]
odd = float(finish_tbl[3][i].replace(',','.').replace('-','NaN'))
jockey = finish_tbl[4][i]
info = finish_tbl[5][i]
finisher_out={"place" : place,
"starter_no1" : starter_no1,
"name" : name,
"odd" : odd,
"jockey" : jockey,
"info" : info
}
finish_list.append(finisher_out)
return finish_list
### TODO:
### 1.) Function for creating pandas df from mongoDB
### 2.)
### 3.) DESCRIPTICE GRAPHS FOR PRESENTATION
### 3.) ADD FANCY GRAPHS TO DATA DESCRIPTION
### 5.) REFACTURE AND IMPELEMENT OLD PIPELINE SETUP AND ML
# Call mongoDB and dump race
def mongo_insert_race(race):
""" Take single race, add to local mongoDB, make race_ID index """
client = pymongo.MongoClient()
db = client.races
db.races.create_index([("race_ID", pymongo.ASCENDING)], unique=True)
results = db.races.insert_one(race)
|
import tempfile, os, sys
from model import Interface
import traceback
download_starting = "starting" # Waiting for UI to start it
download_fetching = "fetching" # In progress
download_checking = "checking" # Checking GPG sig (possibly interactive)
download_complete = "complete" # Downloaded and cached OK
download_failed = "failed"
downloads = {} # URL -> Download
class DownloadError(Exception):
pass
class Download:
url = None
tempfile = None # Stream for result
status = None # download_*
interface = None
errors = None
child_pid = None
child_stderr = None
def __init__(self, interface, url = None):
"Initial status is starting."
assert isinstance(interface, Interface)
self.url = url or interface.uri
self.status = download_starting
self.interface = interface
def start(self):
"""Returns stderr stream from child. Call error_stream_closed() when
it returns EOF."""
assert self.status == download_starting
self.tempfile = tempfile.TemporaryFile(prefix = 'injector-dl-data-')
error_r, error_w = os.pipe()
self.errors = ''
self.child_pid = os.fork()
if self.child_pid == 0:
# We are the child
try:
os.close(error_r)
os.dup2(error_w, 2)
os.close(error_w)
self.download_as_child()
finally:
os._exit(1)
# We are the parent
os.close(error_w)
self.status = download_fetching
return os.fdopen(error_r, 'r')
def download_as_child(self):
import time
try:
print "Child downloading", self.url
#time.sleep(1)
if not os.path.isfile(self.url):
print >>sys.stderr, "File '%s' does not " \
"exist!" % self.url
return
import shutil
shutil.copyfileobj(file(self.url), self.tempfile)
self.tempfile.flush()
#print "Done :-)"
os._exit(0)
except:
traceback.print_exc()
def error_stream_data(self, data):
"""Passed with result of os.read(error_stream, n). Can be
called multiple times, once for each read."""
assert data
assert self.status is download_fetching
self.errors += data
def error_stream_closed(self):
"""Ends a download. Status changes from fetching to checking.
Returns data stream."""
assert self.status is download_fetching
assert self.tempfile is not None
assert self.child_pid is not None
pid, status = os.waitpid(self.child_pid, 0)
assert pid == self.child_pid
self.child_pid = None
errors = self.errors
self.errors = None
if status and not errors:
errors = 'Download process exited with error status ' \
'code 0x' + hex(status)
stream = self.tempfile
self.tempfile = None
if errors:
self.status = download_failed
raise DownloadError(errors)
else:
self.status = download_checking
stream.seek(0)
return stream
def abort(self):
if self.child_pid is not None:
print "Killing download process", self.child_pid
import signal
os.kill(self.child_pid, signal.SIGTERM)
else:
self.status = download_failed
def begin_download(interface, force):
"""Start downloading interface.
If a Download object already exists (any state; in progress, failed or
completed) and force is False, does nothing and returns None.
If force is True, any existing download is destroyed and a new one created."""
dl = downloads.get(interface.uri, None)
if dl:
if force:
dl.abort()
del downloads[interface.uri]
else:
return None # Already downloading
print "Creating new Download(%s)" % interface.uri
# Create new download
dl = Download(interface)
downloads[interface.uri] = dl
assert dl.status == download_starting
return dl
Publish command updates interfaces.
git-svn-id: 2f953839656920094b02ff4c102876eed7a2c24b@77 9f8c893c-44ee-0310-b757-c8ca8341c71e
import tempfile, os, sys
from model import Interface
import traceback
download_starting = "starting" # Waiting for UI to start it
download_fetching = "fetching" # In progress
download_checking = "checking" # Checking GPG sig (possibly interactive)
download_complete = "complete" # Downloaded and cached OK
download_failed = "failed"
downloads = {} # URL -> Download
class DownloadError(Exception):
pass
class Download:
url = None
tempfile = None # Stream for result
status = None # download_*
interface = None
errors = None
child_pid = None
child_stderr = None
def __init__(self, interface, url = None):
"Initial status is starting."
assert isinstance(interface, Interface)
self.url = url or interface.uri
self.status = download_starting
self.interface = interface
def start(self):
"""Returns stderr stream from child. Call error_stream_closed() when
it returns EOF."""
assert self.status == download_starting
self.tempfile = tempfile.TemporaryFile(prefix = 'injector-dl-data-')
error_r, error_w = os.pipe()
self.errors = ''
self.child_pid = os.fork()
if self.child_pid == 0:
# We are the child
try:
os.close(error_r)
os.dup2(error_w, 2)
os.close(error_w)
self.download_as_child()
finally:
os._exit(1)
# We are the parent
os.close(error_w)
self.status = download_fetching
return os.fdopen(error_r, 'r')
def download_as_child(self):
import time
try:
print "Child downloading", self.url
#time.sleep(1)
if not os.path.isfile(self.url):
print >>sys.stderr, "File '%s' does not " \
"exist!" % self.url
return
import shutil
shutil.copyfileobj(file(self.url), self.tempfile)
self.tempfile.flush()
#print "Done :-)"
os._exit(0)
except:
traceback.print_exc()
def error_stream_data(self, data):
"""Passed with result of os.read(error_stream, n). Can be
called multiple times, once for each read."""
assert data
assert self.status is download_fetching
self.errors += data
def error_stream_closed(self):
"""Ends a download. Status changes from fetching to checking.
Returns data stream."""
assert self.status is download_fetching
assert self.tempfile is not None
assert self.child_pid is not None
pid, status = os.waitpid(self.child_pid, 0)
assert pid == self.child_pid
self.child_pid = None
errors = self.errors
self.errors = None
if status and not errors:
errors = 'Download process exited with error status ' \
'code 0x' + hex(status)
stream = self.tempfile
self.tempfile = None
if errors:
self.status = download_failed
raise DownloadError(errors)
else:
self.status = download_checking
stream.seek(0)
return stream
def abort(self):
if self.child_pid is not None:
print "Killing download process", self.child_pid
import signal
os.kill(self.child_pid, signal.SIGTERM)
else:
self.status = download_failed
def begin_download(interface, force):
"""Start downloading interface.
If a Download object already exists (any state; in progress, failed or
completed) and force is False, does nothing and returns None.
If force is True, any existing download is destroyed and a new one created."""
dl = downloads.get(interface.uri, None)
if dl:
if force:
dl.abort()
del downloads[interface.uri]
else:
return None # Already downloading
#print "Creating new Download(%s)" % interface.uri
# Create new download
dl = Download(interface)
downloads[interface.uri] = dl
assert dl.status == download_starting
return dl
|
#!/usr/bin/env python
# Copyright 2012 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Performs per host Linux Bridge configuration for Neutron.
# Based on the structure of the OpenVSwitch agent in the
# Neutron OpenVSwitch Plugin.
import sys
import netaddr
from neutron_lib import constants
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import service
from oslo_utils import excutils
from six import moves
from neutron._i18n import _LE, _LI, _LW
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import exceptions
from neutron.common import profiler as setup_profiler
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron.plugins.common import constants as p_const
from neutron.plugins.common import utils as p_utils
from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb
from neutron.plugins.ml2.drivers.agent import _common_agent as ca
from neutron.plugins.ml2.drivers.agent import config as cagt_config # noqa
from neutron.plugins.ml2.drivers.l2pop.rpc_manager \
import l2population_rpc as l2pop_rpc
from neutron.plugins.ml2.drivers.linuxbridge.agent import arp_protect
from neutron.plugins.ml2.drivers.linuxbridge.agent.common import config # noqa
from neutron.plugins.ml2.drivers.linuxbridge.agent.common \
import constants as lconst
LOG = logging.getLogger(__name__)
LB_AGENT_BINARY = 'neutron-linuxbridge-agent'
BRIDGE_NAME_PREFIX = "brq"
MAX_VLAN_POSTFIX_LEN = 5
VXLAN_INTERFACE_PREFIX = "vxlan-"
class LinuxBridgeManager(amb.CommonAgentManagerBase):
def __init__(self, bridge_mappings, interface_mappings):
super(LinuxBridgeManager, self).__init__()
self.bridge_mappings = bridge_mappings
self.interface_mappings = interface_mappings
self.validate_interface_mappings()
self.validate_bridge_mappings()
self.ip = ip_lib.IPWrapper()
# VXLAN related parameters:
self.local_ip = cfg.CONF.VXLAN.local_ip
self.vxlan_mode = lconst.VXLAN_NONE
if cfg.CONF.VXLAN.enable_vxlan:
device = self.get_local_ip_device()
self.validate_vxlan_group_with_local_ip()
self.local_int = device.name
self.check_vxlan_support()
def validate_interface_mappings(self):
for physnet, interface in self.interface_mappings.items():
if not ip_lib.device_exists(interface):
LOG.error(_LE("Interface %(intf)s for physical network %(net)s"
" does not exist. Agent terminated!"),
{'intf': interface, 'net': physnet})
sys.exit(1)
def validate_bridge_mappings(self):
for physnet, bridge in self.bridge_mappings.items():
if not ip_lib.device_exists(bridge):
LOG.error(_LE("Bridge %(brq)s for physical network %(net)s"
" does not exist. Agent terminated!"),
{'brq': bridge, 'net': physnet})
sys.exit(1)
def validate_vxlan_group_with_local_ip(self):
if not cfg.CONF.VXLAN.vxlan_group:
return
try:
ip_addr = netaddr.IPAddress(self.local_ip)
# Ensure the configured group address/range is valid and multicast
group_net = netaddr.IPNetwork(cfg.CONF.VXLAN.vxlan_group)
if not group_net.is_multicast():
raise ValueError()
if not ip_addr.version == group_net.version:
raise ValueError()
except (netaddr.core.AddrFormatError, ValueError):
LOG.error(_LE("Invalid VXLAN Group: %(group)s, must be an address "
"or network (in CIDR notation) in a multicast "
"range of the same address family as local_ip: "
"%(ip)s"),
{'group': cfg.CONF.VXLAN.vxlan_group,
'ip': self.local_ip})
sys.exit(1)
def get_local_ip_device(self):
"""Return the device with local_ip on the host."""
device = self.ip.get_device_by_ip(self.local_ip)
if not device:
LOG.error(_LE("Tunneling cannot be enabled without the local_ip "
"bound to an interface on the host. Please "
"configure local_ip %s on the host interface to "
"be used for tunneling and restart the agent."),
self.local_ip)
sys.exit(1)
return device
def get_existing_bridge_name(self, physical_network):
if not physical_network:
return None
return self.bridge_mappings.get(physical_network)
@staticmethod
def get_bridge_name(network_id):
if not network_id:
LOG.warning(_LW("Invalid Network ID, will lead to incorrect "
"bridge name"))
bridge_name = BRIDGE_NAME_PREFIX + \
network_id[:lconst.RESOURCE_ID_LENGTH]
return bridge_name
def get_subinterface_name(self, physical_interface, vlan_id):
if not vlan_id:
LOG.warning(_LW("Invalid VLAN ID, will lead to incorrect "
"subinterface name"))
vlan_postfix = '.%s' % vlan_id
# For the vlan subinterface name prefix we use:
# * the physical_interface, if len(physical_interface) +
# len(vlan_postifx) <= 15 for backward compatibility reasons
# Example: physical_interface = eth0
# prefix = eth0.1
# prefix = eth0.1111
#
# * otherwise a unique hash per physical_interface to help debugging
# Example: physical_interface = long_interface
# prefix = longHASHED.1
# prefix = longHASHED.1111
#
# Remark: For some physical_interface values, the used prefix can be
# both, the physical_interface itself or a hash, depending
# on the vlan_postfix length.
# Example: physical_interface = mix_interface
# prefix = mix_interface.1 (backward compatible)
# prefix = mix_iHASHED.1111
if (len(physical_interface) + len(vlan_postfix) >
constants.DEVICE_NAME_MAX_LEN):
physical_interface = p_utils.get_interface_name(
physical_interface, max_len=(constants.DEVICE_NAME_MAX_LEN -
MAX_VLAN_POSTFIX_LEN))
return "%s%s" % (physical_interface, vlan_postfix)
@staticmethod
def get_tap_device_name(interface_id):
if not interface_id:
LOG.warning(_LW("Invalid Interface ID, will lead to incorrect "
"tap device name"))
tap_device_name = constants.TAP_DEVICE_PREFIX + \
interface_id[:lconst.RESOURCE_ID_LENGTH]
return tap_device_name
def get_vxlan_device_name(self, segmentation_id):
if 0 <= int(segmentation_id) <= p_const.MAX_VXLAN_VNI:
return VXLAN_INTERFACE_PREFIX + str(segmentation_id)
else:
LOG.warning(_LW("Invalid Segmentation ID: %s, will lead to "
"incorrect vxlan device name"), segmentation_id)
def get_vxlan_group(self, segmentation_id):
net = netaddr.IPNetwork(cfg.CONF.VXLAN.vxlan_group)
# Map the segmentation ID to (one of) the group address(es)
return str(net.network +
(int(segmentation_id) & int(net.hostmask)))
def get_deletable_bridges(self):
bridge_list = bridge_lib.get_bridge_names()
bridges = {b for b in bridge_list if b.startswith(BRIDGE_NAME_PREFIX)}
bridges.difference_update(self.bridge_mappings.values())
return bridges
def get_tap_devices_count(self, bridge_name):
if_list = bridge_lib.BridgeDevice(bridge_name).get_interfaces()
return len([interface for interface in if_list if
interface.startswith(constants.TAP_DEVICE_PREFIX)])
def ensure_vlan_bridge(self, network_id, phy_bridge_name,
physical_interface, vlan_id):
"""Create a vlan and bridge unless they already exist."""
interface = self.ensure_vlan(physical_interface, vlan_id)
if phy_bridge_name:
return self.ensure_bridge(phy_bridge_name)
else:
bridge_name = self.get_bridge_name(network_id)
ips, gateway = self.get_interface_details(interface)
if self.ensure_bridge(bridge_name, interface, ips, gateway):
return interface
def ensure_vxlan_bridge(self, network_id, segmentation_id):
"""Create a vxlan and bridge unless they already exist."""
interface = self.ensure_vxlan(segmentation_id)
if not interface:
LOG.error(_LE("Failed creating vxlan interface for "
"%(segmentation_id)s"),
{segmentation_id: segmentation_id})
return
bridge_name = self.get_bridge_name(network_id)
self.ensure_bridge(bridge_name, interface)
return interface
def get_interface_details(self, interface):
device = self.ip.device(interface)
ips = device.addr.list(scope='global')
# Update default gateway if necessary
gateway = device.route.get_gateway(scope='global')
return ips, gateway
def ensure_flat_bridge(self, network_id, phy_bridge_name,
physical_interface):
"""Create a non-vlan bridge unless it already exists."""
if phy_bridge_name:
return self.ensure_bridge(phy_bridge_name)
else:
bridge_name = self.get_bridge_name(network_id)
ips, gateway = self.get_interface_details(physical_interface)
if self.ensure_bridge(bridge_name, physical_interface, ips,
gateway):
return physical_interface
def ensure_local_bridge(self, network_id, phy_bridge_name):
"""Create a local bridge unless it already exists."""
if phy_bridge_name:
bridge_name = phy_bridge_name
else:
bridge_name = self.get_bridge_name(network_id)
return self.ensure_bridge(bridge_name)
def ensure_vlan(self, physical_interface, vlan_id):
"""Create a vlan unless it already exists."""
interface = self.get_subinterface_name(physical_interface, vlan_id)
if not ip_lib.device_exists(interface):
LOG.debug("Creating subinterface %(interface)s for "
"VLAN %(vlan_id)s on interface "
"%(physical_interface)s",
{'interface': interface, 'vlan_id': vlan_id,
'physical_interface': physical_interface})
try:
int_vlan = self.ip.add_vlan(interface, physical_interface,
vlan_id)
except RuntimeError:
with excutils.save_and_reraise_exception() as ctxt:
if ip_lib.vlan_in_use(vlan_id):
ctxt.reraise = False
LOG.error(_LE("Unable to create VLAN interface for "
"VLAN ID %s because it is in use by "
"another interface."), vlan_id)
return
int_vlan.disable_ipv6()
int_vlan.link.set_up()
LOG.debug("Done creating subinterface %s", interface)
return interface
def ensure_vxlan(self, segmentation_id):
"""Create a vxlan unless it already exists."""
interface = self.get_vxlan_device_name(segmentation_id)
if not ip_lib.device_exists(interface):
LOG.debug("Creating vxlan interface %(interface)s for "
"VNI %(segmentation_id)s",
{'interface': interface,
'segmentation_id': segmentation_id})
args = {'dev': self.local_int}
if self.vxlan_mode == lconst.VXLAN_MCAST:
args['group'] = self.get_vxlan_group(segmentation_id)
if cfg.CONF.VXLAN.ttl:
args['ttl'] = cfg.CONF.VXLAN.ttl
if cfg.CONF.VXLAN.tos:
args['tos'] = cfg.CONF.VXLAN.tos
if cfg.CONF.VXLAN.l2_population:
args['proxy'] = cfg.CONF.VXLAN.arp_responder
try:
int_vxlan = self.ip.add_vxlan(interface, segmentation_id,
**args)
except RuntimeError:
with excutils.save_and_reraise_exception() as ctxt:
# perform this check after an attempt rather than before
# to avoid excessive lookups and a possible race condition.
if ip_lib.vxlan_in_use(segmentation_id):
ctxt.reraise = False
LOG.error(_LE("Unable to create VXLAN interface for "
"VNI %s because it is in use by another "
"interface."), segmentation_id)
return None
int_vxlan.disable_ipv6()
int_vxlan.link.set_up()
LOG.debug("Done creating vxlan interface %s", interface)
return interface
def update_interface_ip_details(self, destination, source, ips,
gateway):
if ips or gateway:
dst_device = self.ip.device(destination)
src_device = self.ip.device(source)
# Append IP's to bridge if necessary
if ips:
for ip in ips:
dst_device.addr.add(cidr=ip['cidr'])
if gateway:
# Ensure that the gateway can be updated by changing the metric
metric = 100
if 'metric' in gateway:
metric = gateway['metric'] - 1
dst_device.route.add_gateway(gateway=gateway['gateway'],
metric=metric)
src_device.route.delete_gateway(gateway=gateway['gateway'])
# Remove IP's from interface
if ips:
for ip in ips:
src_device.addr.delete(cidr=ip['cidr'])
def _bridge_exists_and_ensure_up(self, bridge_name):
"""Check if the bridge exists and make sure it is up."""
br = ip_lib.IPDevice(bridge_name)
br.set_log_fail_as_error(False)
try:
# If the device doesn't exist this will throw a RuntimeError
br.link.set_up()
except RuntimeError:
return False
return True
def ensure_bridge(self, bridge_name, interface=None, ips=None,
gateway=None):
"""Create a bridge unless it already exists."""
# _bridge_exists_and_ensure_up instead of device_exists is used here
# because there are cases where the bridge exists but it's not UP,
# for example:
# 1) A greenthread was executing this function and had not yet executed
# "ip link set bridge_name up" before eventlet switched to this
# thread running the same function
# 2) The Nova VIF driver was running concurrently and had just created
# the bridge, but had not yet put it UP
if not self._bridge_exists_and_ensure_up(bridge_name):
LOG.debug("Starting bridge %(bridge_name)s for subinterface "
"%(interface)s",
{'bridge_name': bridge_name, 'interface': interface})
bridge_device = bridge_lib.BridgeDevice.addbr(bridge_name)
if bridge_device.setfd(0):
return
if bridge_device.disable_stp():
return
if bridge_device.disable_ipv6():
return
if bridge_device.link.set_up():
return
LOG.debug("Done starting bridge %(bridge_name)s for "
"subinterface %(interface)s",
{'bridge_name': bridge_name, 'interface': interface})
else:
bridge_device = bridge_lib.BridgeDevice(bridge_name)
if not interface:
return bridge_name
# Update IP info if necessary
self.update_interface_ip_details(bridge_name, interface, ips, gateway)
# Check if the interface is part of the bridge
if not bridge_device.owns_interface(interface):
try:
# Check if the interface is not enslaved in another bridge
bridge = bridge_lib.BridgeDevice.get_interface_bridge(
interface)
if bridge:
bridge.delif(interface)
bridge_device.addif(interface)
except Exception as e:
LOG.error(_LE("Unable to add %(interface)s to %(bridge_name)s"
"! Exception: %(e)s"),
{'interface': interface, 'bridge_name': bridge_name,
'e': e})
return
return bridge_name
def ensure_physical_in_bridge(self, network_id,
network_type,
physical_network,
segmentation_id):
if network_type == p_const.TYPE_VXLAN:
if self.vxlan_mode == lconst.VXLAN_NONE:
LOG.error(_LE("Unable to add vxlan interface for network %s"),
network_id)
return
return self.ensure_vxlan_bridge(network_id, segmentation_id)
# NOTE(nick-ma-z): Obtain mappings of physical bridge and interfaces
physical_bridge = self.get_existing_bridge_name(physical_network)
physical_interface = self.interface_mappings.get(physical_network)
if not physical_bridge and not physical_interface:
LOG.error(_LE("No bridge or interface mappings"
" for physical network %s"),
physical_network)
return
if network_type == p_const.TYPE_FLAT:
return self.ensure_flat_bridge(network_id, physical_bridge,
physical_interface)
elif network_type == p_const.TYPE_VLAN:
return self.ensure_vlan_bridge(network_id, physical_bridge,
physical_interface,
segmentation_id)
else:
LOG.error(_LE("Unknown network_type %(network_type)s for network "
"%(network_id)s."), {network_type: network_type,
network_id: network_id})
def add_tap_interface(self, network_id, network_type, physical_network,
segmentation_id, tap_device_name, device_owner):
"""Add tap interface and handle interface missing exeptions."""
try:
return self._add_tap_interface(network_id, network_type,
physical_network, segmentation_id,
tap_device_name, device_owner)
except Exception:
with excutils.save_and_reraise_exception() as ctx:
if not ip_lib.device_exists(tap_device_name):
# the exception was likely a side effect of the tap device
# being removed during handling so we just return false
# like we would if it didn't exist to begin with.
ctx.reraise = False
return False
def _add_tap_interface(self, network_id, network_type, physical_network,
segmentation_id, tap_device_name, device_owner):
"""Add tap interface.
If a VIF has been plugged into a network, this function will
add the corresponding tap device to the relevant bridge.
"""
if not ip_lib.device_exists(tap_device_name):
LOG.debug("Tap device: %s does not exist on "
"this host, skipped", tap_device_name)
return False
bridge_name = self.get_existing_bridge_name(physical_network)
if not bridge_name:
bridge_name = self.get_bridge_name(network_id)
if network_type == p_const.TYPE_LOCAL:
self.ensure_local_bridge(network_id, bridge_name)
else:
phy_dev_name = self.ensure_physical_in_bridge(network_id,
network_type,
physical_network,
segmentation_id)
if not phy_dev_name:
return False
self.ensure_tap_mtu(tap_device_name, phy_dev_name)
# Avoid messing with plugging devices into a bridge that the agent
# does not own
if device_owner.startswith(constants.DEVICE_OWNER_PREFIXES):
# Check if device needs to be added to bridge
if not bridge_lib.BridgeDevice.get_interface_bridge(
tap_device_name):
data = {'tap_device_name': tap_device_name,
'bridge_name': bridge_name}
LOG.debug("Adding device %(tap_device_name)s to bridge "
"%(bridge_name)s", data)
if bridge_lib.BridgeDevice(bridge_name).addif(tap_device_name):
return False
else:
data = {'tap_device_name': tap_device_name,
'device_owner': device_owner,
'bridge_name': bridge_name}
LOG.debug("Skip adding device %(tap_device_name)s to "
"%(bridge_name)s. It is owned by %(device_owner)s and "
"thus added elsewhere.", data)
return True
def ensure_tap_mtu(self, tap_dev_name, phy_dev_name):
"""Ensure the MTU on the tap is the same as the physical device."""
phy_dev_mtu = ip_lib.IPDevice(phy_dev_name).link.mtu
ip_lib.IPDevice(tap_dev_name).link.set_mtu(phy_dev_mtu)
def plug_interface(self, network_id, network_segment, tap_name,
device_owner):
return self.add_tap_interface(network_id, network_segment.network_type,
network_segment.physical_network,
network_segment.segmentation_id,
tap_name, device_owner)
def delete_bridge(self, bridge_name):
bridge_device = bridge_lib.BridgeDevice(bridge_name)
if bridge_device.exists():
physical_interfaces = set(self.interface_mappings.values())
interfaces_on_bridge = bridge_device.get_interfaces()
for interface in interfaces_on_bridge:
self.remove_interface(bridge_name, interface)
if interface.startswith(VXLAN_INTERFACE_PREFIX):
self.delete_interface(interface)
else:
# Match the vlan/flat interface in the bridge.
# If the bridge has an IP, it mean that this IP was moved
# from the current interface, which also mean that this
# interface was not created by the agent.
ips, gateway = self.get_interface_details(bridge_name)
if ips:
self.update_interface_ip_details(interface,
bridge_name,
ips, gateway)
elif interface not in physical_interfaces:
self.delete_interface(interface)
try:
LOG.debug("Deleting bridge %s", bridge_name)
if bridge_device.link.set_down():
return
if bridge_device.delbr():
return
LOG.debug("Done deleting bridge %s", bridge_name)
except RuntimeError:
with excutils.save_and_reraise_exception() as ctxt:
if not bridge_device.exists():
# the exception was likely a side effect of the bridge
# being removed by nova during handling,
# so we just return
ctxt.reraise = False
LOG.debug("Cannot delete bridge %s; it does not exist",
bridge_name)
return
else:
LOG.debug("Cannot delete bridge %s; it does not exist",
bridge_name)
def remove_interface(self, bridge_name, interface_name):
bridge_device = bridge_lib.BridgeDevice(bridge_name)
if bridge_device.exists():
if not bridge_lib.is_bridged_interface(interface_name):
return True
LOG.debug("Removing device %(interface_name)s from bridge "
"%(bridge_name)s",
{'interface_name': interface_name,
'bridge_name': bridge_name})
if bridge_device.delif(interface_name):
return False
LOG.debug("Done removing device %(interface_name)s from bridge "
"%(bridge_name)s",
{'interface_name': interface_name,
'bridge_name': bridge_name})
return True
else:
LOG.debug("Cannot remove device %(interface_name)s bridge "
"%(bridge_name)s does not exist",
{'interface_name': interface_name,
'bridge_name': bridge_name})
return False
def delete_interface(self, interface):
device = self.ip.device(interface)
if device.exists():
LOG.debug("Deleting interface %s",
interface)
device.link.set_down()
device.link.delete()
LOG.debug("Done deleting interface %s", interface)
def get_devices_modified_timestamps(self, devices):
return {d: bridge_lib.get_interface_bridged_time(d) for d in devices}
def get_all_devices(self):
devices = set()
for device in bridge_lib.get_bridge_names():
if device.startswith(constants.TAP_DEVICE_PREFIX):
devices.add(device)
return devices
def vxlan_ucast_supported(self):
if not cfg.CONF.VXLAN.l2_population:
return False
if not ip_lib.iproute_arg_supported(
['bridge', 'fdb'], 'append'):
LOG.warning(_LW('Option "%(option)s" must be supported by command '
'"%(command)s" to enable %(mode)s mode'),
{'option': 'append',
'command': 'bridge fdb',
'mode': 'VXLAN UCAST'})
return False
test_iface = None
for seg_id in moves.range(1, p_const.MAX_VXLAN_VNI + 1):
if (ip_lib.device_exists(self.get_vxlan_device_name(seg_id))
or ip_lib.vxlan_in_use(seg_id)):
continue
test_iface = self.ensure_vxlan(seg_id)
break
else:
LOG.error(_LE('No valid Segmentation ID to perform UCAST test.'))
return False
try:
utils.execute(
cmd=['bridge', 'fdb', 'append', constants.FLOODING_ENTRY[0],
'dev', test_iface, 'dst', '1.1.1.1'],
run_as_root=True, log_fail_as_error=False)
return True
except RuntimeError:
return False
finally:
self.delete_interface(test_iface)
def vxlan_mcast_supported(self):
if not cfg.CONF.VXLAN.vxlan_group:
LOG.warning(_LW('VXLAN muticast group(s) must be provided in '
'vxlan_group option to enable VXLAN MCAST mode'))
return False
if not ip_lib.iproute_arg_supported(
['ip', 'link', 'add', 'type', 'vxlan'],
'proxy'):
LOG.warning(_LW('Option "%(option)s" must be supported by command '
'"%(command)s" to enable %(mode)s mode'),
{'option': 'proxy',
'command': 'ip link add type vxlan',
'mode': 'VXLAN MCAST'})
return False
return True
def check_vxlan_support(self):
self.vxlan_mode = lconst.VXLAN_NONE
if self.vxlan_ucast_supported():
self.vxlan_mode = lconst.VXLAN_UCAST
elif self.vxlan_mcast_supported():
self.vxlan_mode = lconst.VXLAN_MCAST
else:
raise exceptions.VxlanNetworkUnsupported()
LOG.debug('Using %s VXLAN mode', self.vxlan_mode)
def fdb_ip_entry_exists(self, mac, ip, interface):
entries = utils.execute(['ip', 'neigh', 'show', 'to', ip,
'dev', interface],
run_as_root=True)
return mac in entries
def fdb_bridge_entry_exists(self, mac, interface, agent_ip=None):
entries = utils.execute(['bridge', 'fdb', 'show', 'dev', interface],
run_as_root=True)
if not agent_ip:
return mac in entries
return (agent_ip in entries and mac in entries)
def add_fdb_ip_entry(self, mac, ip, interface):
ip_lib.IPDevice(interface).neigh.add(ip, mac)
def remove_fdb_ip_entry(self, mac, ip, interface):
ip_lib.IPDevice(interface).neigh.delete(ip, mac)
def add_fdb_bridge_entry(self, mac, agent_ip, interface, operation="add"):
utils.execute(['bridge', 'fdb', operation, mac, 'dev', interface,
'dst', agent_ip],
run_as_root=True,
check_exit_code=False)
def remove_fdb_bridge_entry(self, mac, agent_ip, interface):
utils.execute(['bridge', 'fdb', 'del', mac, 'dev', interface,
'dst', agent_ip],
run_as_root=True,
check_exit_code=False)
def add_fdb_entries(self, agent_ip, ports, interface):
for mac, ip in ports:
if mac != constants.FLOODING_ENTRY[0]:
self.add_fdb_ip_entry(mac, ip, interface)
self.add_fdb_bridge_entry(mac, agent_ip, interface,
operation="replace")
elif self.vxlan_mode == lconst.VXLAN_UCAST:
if self.fdb_bridge_entry_exists(mac, interface):
self.add_fdb_bridge_entry(mac, agent_ip, interface,
"append")
else:
self.add_fdb_bridge_entry(mac, agent_ip, interface)
def remove_fdb_entries(self, agent_ip, ports, interface):
for mac, ip in ports:
if mac != constants.FLOODING_ENTRY[0]:
self.remove_fdb_ip_entry(mac, ip, interface)
self.remove_fdb_bridge_entry(mac, agent_ip, interface)
elif self.vxlan_mode == lconst.VXLAN_UCAST:
self.remove_fdb_bridge_entry(mac, agent_ip, interface)
def get_agent_id(self):
if self.bridge_mappings:
mac = utils.get_interface_mac(
list(self.bridge_mappings.values())[0])
else:
devices = ip_lib.IPWrapper().get_devices(True)
if devices:
mac = utils.get_interface_mac(devices[0].name)
else:
LOG.error(_LE("Unable to obtain MAC address for unique ID. "
"Agent terminated!"))
sys.exit(1)
return 'lb%s' % mac.replace(":", "")
def get_agent_configurations(self):
configurations = {'bridge_mappings': self.bridge_mappings,
'interface_mappings': self.interface_mappings
}
if self.vxlan_mode != lconst.VXLAN_NONE:
configurations['tunneling_ip'] = self.local_ip
configurations['tunnel_types'] = [p_const.TYPE_VXLAN]
configurations['l2_population'] = cfg.CONF.VXLAN.l2_population
return configurations
def get_rpc_callbacks(self, context, agent, sg_agent):
return LinuxBridgeRpcCallbacks(context, agent, sg_agent)
def get_rpc_consumers(self):
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.NETWORK, topics.UPDATE],
[topics.SECURITY_GROUP, topics.UPDATE]]
if cfg.CONF.VXLAN.l2_population:
consumers.append([topics.L2POPULATION, topics.UPDATE])
return consumers
def ensure_port_admin_state(self, tap_name, admin_state_up):
LOG.debug("Setting admin_state_up to %s for device %s",
admin_state_up, tap_name)
if admin_state_up:
ip_lib.IPDevice(tap_name).link.set_up()
else:
ip_lib.IPDevice(tap_name).link.set_down()
def setup_arp_spoofing_protection(self, device, device_details):
arp_protect.setup_arp_spoofing_protection(device, device_details)
def delete_arp_spoofing_protection(self, devices):
arp_protect.delete_arp_spoofing_protection(devices)
def delete_unreferenced_arp_protection(self, current_devices):
arp_protect.delete_unreferenced_arp_protection(current_devices)
def get_extension_driver_type(self):
return lconst.EXTENSION_DRIVER_TYPE
class LinuxBridgeRpcCallbacks(
sg_rpc.SecurityGroupAgentRpcCallbackMixin,
l2pop_rpc.L2populationRpcCallBackMixin,
amb.CommonAgentManagerRpcCallBackBase):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
# 1.3 Added param devices_to_update to security_groups_provider_updated
# 1.4 Added support for network_update
target = oslo_messaging.Target(version='1.4')
def network_delete(self, context, **kwargs):
LOG.debug("network_delete received")
network_id = kwargs.get('network_id')
# NOTE(nick-ma-z): Don't remove pre-existing user-defined bridges
if network_id in self.network_map:
phynet = self.network_map[network_id].physical_network
if phynet and phynet in self.agent.mgr.bridge_mappings:
LOG.info(_LI("Physical network %s is defined in "
"bridge_mappings and cannot be deleted."),
network_id)
return
else:
LOG.debug("Network %s is not on this agent.", network_id)
return
bridge_name = self.agent.mgr.get_bridge_name(network_id)
LOG.debug("Delete %s", bridge_name)
self.agent.mgr.delete_bridge(bridge_name)
def port_update(self, context, **kwargs):
port_id = kwargs['port']['id']
device_name = self.agent.mgr.get_tap_device_name(port_id)
# Put the device name in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
self.updated_devices.add(device_name)
LOG.debug("port_update RPC received for port: %s", port_id)
def network_update(self, context, **kwargs):
network_id = kwargs['network']['id']
LOG.debug("network_update message processed for network "
"%(network_id)s, with ports: %(ports)s",
{'network_id': network_id,
'ports': self.agent.network_ports[network_id]})
for port_data in self.agent.network_ports[network_id]:
self.updated_devices.add(port_data['device'])
def fdb_add(self, context, fdb_entries):
LOG.debug("fdb_add received")
for network_id, values in fdb_entries.items():
segment = self.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.mgr.get_vxlan_device_name(
segment.segmentation_id)
agent_ports = values.get('ports')
for agent_ip, ports in agent_ports.items():
if agent_ip == self.agent.mgr.local_ip:
continue
self.agent.mgr.add_fdb_entries(agent_ip,
ports,
interface)
def fdb_remove(self, context, fdb_entries):
LOG.debug("fdb_remove received")
for network_id, values in fdb_entries.items():
segment = self.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.mgr.get_vxlan_device_name(
segment.segmentation_id)
agent_ports = values.get('ports')
for agent_ip, ports in agent_ports.items():
if agent_ip == self.agent.mgr.local_ip:
continue
self.agent.mgr.remove_fdb_entries(agent_ip,
ports,
interface)
def _fdb_chg_ip(self, context, fdb_entries):
LOG.debug("update chg_ip received")
for network_id, agent_ports in fdb_entries.items():
segment = self.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.mgr.get_vxlan_device_name(
segment.segmentation_id)
for agent_ip, state in agent_ports.items():
if agent_ip == self.agent.mgr.local_ip:
continue
after = state.get('after', [])
for mac, ip in after:
self.agent.mgr.add_fdb_ip_entry(mac, ip, interface)
before = state.get('before', [])
for mac, ip in before:
self.agent.mgr.remove_fdb_ip_entry(mac, ip, interface)
def fdb_update(self, context, fdb_entries):
LOG.debug("fdb_update received")
for action, values in fdb_entries.items():
method = '_fdb_' + action
if not hasattr(self, method):
raise NotImplementedError()
getattr(self, method)(context, values)
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
try:
interface_mappings = n_utils.parse_mappings(
cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
except ValueError as e:
LOG.error(_LE("Parsing physical_interface_mappings failed: %s. "
"Agent terminated!"), e)
sys.exit(1)
LOG.info(_LI("Interface mappings: %s"), interface_mappings)
try:
bridge_mappings = n_utils.parse_mappings(
cfg.CONF.LINUX_BRIDGE.bridge_mappings)
except ValueError as e:
LOG.error(_LE("Parsing bridge_mappings failed: %s. "
"Agent terminated!"), e)
sys.exit(1)
LOG.info(_LI("Bridge mappings: %s"), bridge_mappings)
manager = LinuxBridgeManager(bridge_mappings, interface_mappings)
polling_interval = cfg.CONF.AGENT.polling_interval
quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout
agent = ca.CommonAgentLoop(manager, polling_interval, quitting_rpc_timeout,
constants.AGENT_TYPE_LINUXBRIDGE,
LB_AGENT_BINARY)
setup_profiler.setup("neutron-linuxbridge-agent", cfg.CONF.host)
LOG.info(_LI("Agent initialized successfully, now running... "))
launcher = service.launch(cfg.CONF, agent)
launcher.wait()
Fix typo in method description
Change-Id: Ic406c9fb12fc6e5af05663f11d6a6d6fcd11c832
#!/usr/bin/env python
# Copyright 2012 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Performs per host Linux Bridge configuration for Neutron.
# Based on the structure of the OpenVSwitch agent in the
# Neutron OpenVSwitch Plugin.
import sys
import netaddr
from neutron_lib import constants
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import service
from oslo_utils import excutils
from six import moves
from neutron._i18n import _LE, _LI, _LW
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import exceptions
from neutron.common import profiler as setup_profiler
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron.plugins.common import constants as p_const
from neutron.plugins.common import utils as p_utils
from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb
from neutron.plugins.ml2.drivers.agent import _common_agent as ca
from neutron.plugins.ml2.drivers.agent import config as cagt_config # noqa
from neutron.plugins.ml2.drivers.l2pop.rpc_manager \
import l2population_rpc as l2pop_rpc
from neutron.plugins.ml2.drivers.linuxbridge.agent import arp_protect
from neutron.plugins.ml2.drivers.linuxbridge.agent.common import config # noqa
from neutron.plugins.ml2.drivers.linuxbridge.agent.common \
import constants as lconst
LOG = logging.getLogger(__name__)
LB_AGENT_BINARY = 'neutron-linuxbridge-agent'
BRIDGE_NAME_PREFIX = "brq"
MAX_VLAN_POSTFIX_LEN = 5
VXLAN_INTERFACE_PREFIX = "vxlan-"
class LinuxBridgeManager(amb.CommonAgentManagerBase):
def __init__(self, bridge_mappings, interface_mappings):
super(LinuxBridgeManager, self).__init__()
self.bridge_mappings = bridge_mappings
self.interface_mappings = interface_mappings
self.validate_interface_mappings()
self.validate_bridge_mappings()
self.ip = ip_lib.IPWrapper()
# VXLAN related parameters:
self.local_ip = cfg.CONF.VXLAN.local_ip
self.vxlan_mode = lconst.VXLAN_NONE
if cfg.CONF.VXLAN.enable_vxlan:
device = self.get_local_ip_device()
self.validate_vxlan_group_with_local_ip()
self.local_int = device.name
self.check_vxlan_support()
def validate_interface_mappings(self):
for physnet, interface in self.interface_mappings.items():
if not ip_lib.device_exists(interface):
LOG.error(_LE("Interface %(intf)s for physical network %(net)s"
" does not exist. Agent terminated!"),
{'intf': interface, 'net': physnet})
sys.exit(1)
def validate_bridge_mappings(self):
for physnet, bridge in self.bridge_mappings.items():
if not ip_lib.device_exists(bridge):
LOG.error(_LE("Bridge %(brq)s for physical network %(net)s"
" does not exist. Agent terminated!"),
{'brq': bridge, 'net': physnet})
sys.exit(1)
def validate_vxlan_group_with_local_ip(self):
if not cfg.CONF.VXLAN.vxlan_group:
return
try:
ip_addr = netaddr.IPAddress(self.local_ip)
# Ensure the configured group address/range is valid and multicast
group_net = netaddr.IPNetwork(cfg.CONF.VXLAN.vxlan_group)
if not group_net.is_multicast():
raise ValueError()
if not ip_addr.version == group_net.version:
raise ValueError()
except (netaddr.core.AddrFormatError, ValueError):
LOG.error(_LE("Invalid VXLAN Group: %(group)s, must be an address "
"or network (in CIDR notation) in a multicast "
"range of the same address family as local_ip: "
"%(ip)s"),
{'group': cfg.CONF.VXLAN.vxlan_group,
'ip': self.local_ip})
sys.exit(1)
def get_local_ip_device(self):
"""Return the device with local_ip on the host."""
device = self.ip.get_device_by_ip(self.local_ip)
if not device:
LOG.error(_LE("Tunneling cannot be enabled without the local_ip "
"bound to an interface on the host. Please "
"configure local_ip %s on the host interface to "
"be used for tunneling and restart the agent."),
self.local_ip)
sys.exit(1)
return device
def get_existing_bridge_name(self, physical_network):
if not physical_network:
return None
return self.bridge_mappings.get(physical_network)
@staticmethod
def get_bridge_name(network_id):
if not network_id:
LOG.warning(_LW("Invalid Network ID, will lead to incorrect "
"bridge name"))
bridge_name = BRIDGE_NAME_PREFIX + \
network_id[:lconst.RESOURCE_ID_LENGTH]
return bridge_name
def get_subinterface_name(self, physical_interface, vlan_id):
if not vlan_id:
LOG.warning(_LW("Invalid VLAN ID, will lead to incorrect "
"subinterface name"))
vlan_postfix = '.%s' % vlan_id
# For the vlan subinterface name prefix we use:
# * the physical_interface, if len(physical_interface) +
# len(vlan_postifx) <= 15 for backward compatibility reasons
# Example: physical_interface = eth0
# prefix = eth0.1
# prefix = eth0.1111
#
# * otherwise a unique hash per physical_interface to help debugging
# Example: physical_interface = long_interface
# prefix = longHASHED.1
# prefix = longHASHED.1111
#
# Remark: For some physical_interface values, the used prefix can be
# both, the physical_interface itself or a hash, depending
# on the vlan_postfix length.
# Example: physical_interface = mix_interface
# prefix = mix_interface.1 (backward compatible)
# prefix = mix_iHASHED.1111
if (len(physical_interface) + len(vlan_postfix) >
constants.DEVICE_NAME_MAX_LEN):
physical_interface = p_utils.get_interface_name(
physical_interface, max_len=(constants.DEVICE_NAME_MAX_LEN -
MAX_VLAN_POSTFIX_LEN))
return "%s%s" % (physical_interface, vlan_postfix)
@staticmethod
def get_tap_device_name(interface_id):
if not interface_id:
LOG.warning(_LW("Invalid Interface ID, will lead to incorrect "
"tap device name"))
tap_device_name = constants.TAP_DEVICE_PREFIX + \
interface_id[:lconst.RESOURCE_ID_LENGTH]
return tap_device_name
def get_vxlan_device_name(self, segmentation_id):
if 0 <= int(segmentation_id) <= p_const.MAX_VXLAN_VNI:
return VXLAN_INTERFACE_PREFIX + str(segmentation_id)
else:
LOG.warning(_LW("Invalid Segmentation ID: %s, will lead to "
"incorrect vxlan device name"), segmentation_id)
def get_vxlan_group(self, segmentation_id):
net = netaddr.IPNetwork(cfg.CONF.VXLAN.vxlan_group)
# Map the segmentation ID to (one of) the group address(es)
return str(net.network +
(int(segmentation_id) & int(net.hostmask)))
def get_deletable_bridges(self):
bridge_list = bridge_lib.get_bridge_names()
bridges = {b for b in bridge_list if b.startswith(BRIDGE_NAME_PREFIX)}
bridges.difference_update(self.bridge_mappings.values())
return bridges
def get_tap_devices_count(self, bridge_name):
if_list = bridge_lib.BridgeDevice(bridge_name).get_interfaces()
return len([interface for interface in if_list if
interface.startswith(constants.TAP_DEVICE_PREFIX)])
def ensure_vlan_bridge(self, network_id, phy_bridge_name,
physical_interface, vlan_id):
"""Create a vlan and bridge unless they already exist."""
interface = self.ensure_vlan(physical_interface, vlan_id)
if phy_bridge_name:
return self.ensure_bridge(phy_bridge_name)
else:
bridge_name = self.get_bridge_name(network_id)
ips, gateway = self.get_interface_details(interface)
if self.ensure_bridge(bridge_name, interface, ips, gateway):
return interface
def ensure_vxlan_bridge(self, network_id, segmentation_id):
"""Create a vxlan and bridge unless they already exist."""
interface = self.ensure_vxlan(segmentation_id)
if not interface:
LOG.error(_LE("Failed creating vxlan interface for "
"%(segmentation_id)s"),
{segmentation_id: segmentation_id})
return
bridge_name = self.get_bridge_name(network_id)
self.ensure_bridge(bridge_name, interface)
return interface
def get_interface_details(self, interface):
device = self.ip.device(interface)
ips = device.addr.list(scope='global')
# Update default gateway if necessary
gateway = device.route.get_gateway(scope='global')
return ips, gateway
def ensure_flat_bridge(self, network_id, phy_bridge_name,
physical_interface):
"""Create a non-vlan bridge unless it already exists."""
if phy_bridge_name:
return self.ensure_bridge(phy_bridge_name)
else:
bridge_name = self.get_bridge_name(network_id)
ips, gateway = self.get_interface_details(physical_interface)
if self.ensure_bridge(bridge_name, physical_interface, ips,
gateway):
return physical_interface
def ensure_local_bridge(self, network_id, phy_bridge_name):
"""Create a local bridge unless it already exists."""
if phy_bridge_name:
bridge_name = phy_bridge_name
else:
bridge_name = self.get_bridge_name(network_id)
return self.ensure_bridge(bridge_name)
def ensure_vlan(self, physical_interface, vlan_id):
"""Create a vlan unless it already exists."""
interface = self.get_subinterface_name(physical_interface, vlan_id)
if not ip_lib.device_exists(interface):
LOG.debug("Creating subinterface %(interface)s for "
"VLAN %(vlan_id)s on interface "
"%(physical_interface)s",
{'interface': interface, 'vlan_id': vlan_id,
'physical_interface': physical_interface})
try:
int_vlan = self.ip.add_vlan(interface, physical_interface,
vlan_id)
except RuntimeError:
with excutils.save_and_reraise_exception() as ctxt:
if ip_lib.vlan_in_use(vlan_id):
ctxt.reraise = False
LOG.error(_LE("Unable to create VLAN interface for "
"VLAN ID %s because it is in use by "
"another interface."), vlan_id)
return
int_vlan.disable_ipv6()
int_vlan.link.set_up()
LOG.debug("Done creating subinterface %s", interface)
return interface
def ensure_vxlan(self, segmentation_id):
"""Create a vxlan unless it already exists."""
interface = self.get_vxlan_device_name(segmentation_id)
if not ip_lib.device_exists(interface):
LOG.debug("Creating vxlan interface %(interface)s for "
"VNI %(segmentation_id)s",
{'interface': interface,
'segmentation_id': segmentation_id})
args = {'dev': self.local_int}
if self.vxlan_mode == lconst.VXLAN_MCAST:
args['group'] = self.get_vxlan_group(segmentation_id)
if cfg.CONF.VXLAN.ttl:
args['ttl'] = cfg.CONF.VXLAN.ttl
if cfg.CONF.VXLAN.tos:
args['tos'] = cfg.CONF.VXLAN.tos
if cfg.CONF.VXLAN.l2_population:
args['proxy'] = cfg.CONF.VXLAN.arp_responder
try:
int_vxlan = self.ip.add_vxlan(interface, segmentation_id,
**args)
except RuntimeError:
with excutils.save_and_reraise_exception() as ctxt:
# perform this check after an attempt rather than before
# to avoid excessive lookups and a possible race condition.
if ip_lib.vxlan_in_use(segmentation_id):
ctxt.reraise = False
LOG.error(_LE("Unable to create VXLAN interface for "
"VNI %s because it is in use by another "
"interface."), segmentation_id)
return None
int_vxlan.disable_ipv6()
int_vxlan.link.set_up()
LOG.debug("Done creating vxlan interface %s", interface)
return interface
def update_interface_ip_details(self, destination, source, ips,
gateway):
if ips or gateway:
dst_device = self.ip.device(destination)
src_device = self.ip.device(source)
# Append IP's to bridge if necessary
if ips:
for ip in ips:
dst_device.addr.add(cidr=ip['cidr'])
if gateway:
# Ensure that the gateway can be updated by changing the metric
metric = 100
if 'metric' in gateway:
metric = gateway['metric'] - 1
dst_device.route.add_gateway(gateway=gateway['gateway'],
metric=metric)
src_device.route.delete_gateway(gateway=gateway['gateway'])
# Remove IP's from interface
if ips:
for ip in ips:
src_device.addr.delete(cidr=ip['cidr'])
def _bridge_exists_and_ensure_up(self, bridge_name):
"""Check if the bridge exists and make sure it is up."""
br = ip_lib.IPDevice(bridge_name)
br.set_log_fail_as_error(False)
try:
# If the device doesn't exist this will throw a RuntimeError
br.link.set_up()
except RuntimeError:
return False
return True
def ensure_bridge(self, bridge_name, interface=None, ips=None,
gateway=None):
"""Create a bridge unless it already exists."""
# _bridge_exists_and_ensure_up instead of device_exists is used here
# because there are cases where the bridge exists but it's not UP,
# for example:
# 1) A greenthread was executing this function and had not yet executed
# "ip link set bridge_name up" before eventlet switched to this
# thread running the same function
# 2) The Nova VIF driver was running concurrently and had just created
# the bridge, but had not yet put it UP
if not self._bridge_exists_and_ensure_up(bridge_name):
LOG.debug("Starting bridge %(bridge_name)s for subinterface "
"%(interface)s",
{'bridge_name': bridge_name, 'interface': interface})
bridge_device = bridge_lib.BridgeDevice.addbr(bridge_name)
if bridge_device.setfd(0):
return
if bridge_device.disable_stp():
return
if bridge_device.disable_ipv6():
return
if bridge_device.link.set_up():
return
LOG.debug("Done starting bridge %(bridge_name)s for "
"subinterface %(interface)s",
{'bridge_name': bridge_name, 'interface': interface})
else:
bridge_device = bridge_lib.BridgeDevice(bridge_name)
if not interface:
return bridge_name
# Update IP info if necessary
self.update_interface_ip_details(bridge_name, interface, ips, gateway)
# Check if the interface is part of the bridge
if not bridge_device.owns_interface(interface):
try:
# Check if the interface is not enslaved in another bridge
bridge = bridge_lib.BridgeDevice.get_interface_bridge(
interface)
if bridge:
bridge.delif(interface)
bridge_device.addif(interface)
except Exception as e:
LOG.error(_LE("Unable to add %(interface)s to %(bridge_name)s"
"! Exception: %(e)s"),
{'interface': interface, 'bridge_name': bridge_name,
'e': e})
return
return bridge_name
def ensure_physical_in_bridge(self, network_id,
network_type,
physical_network,
segmentation_id):
if network_type == p_const.TYPE_VXLAN:
if self.vxlan_mode == lconst.VXLAN_NONE:
LOG.error(_LE("Unable to add vxlan interface for network %s"),
network_id)
return
return self.ensure_vxlan_bridge(network_id, segmentation_id)
# NOTE(nick-ma-z): Obtain mappings of physical bridge and interfaces
physical_bridge = self.get_existing_bridge_name(physical_network)
physical_interface = self.interface_mappings.get(physical_network)
if not physical_bridge and not physical_interface:
LOG.error(_LE("No bridge or interface mappings"
" for physical network %s"),
physical_network)
return
if network_type == p_const.TYPE_FLAT:
return self.ensure_flat_bridge(network_id, physical_bridge,
physical_interface)
elif network_type == p_const.TYPE_VLAN:
return self.ensure_vlan_bridge(network_id, physical_bridge,
physical_interface,
segmentation_id)
else:
LOG.error(_LE("Unknown network_type %(network_type)s for network "
"%(network_id)s."), {network_type: network_type,
network_id: network_id})
def add_tap_interface(self, network_id, network_type, physical_network,
segmentation_id, tap_device_name, device_owner):
"""Add tap interface and handle interface missing exceptions."""
try:
return self._add_tap_interface(network_id, network_type,
physical_network, segmentation_id,
tap_device_name, device_owner)
except Exception:
with excutils.save_and_reraise_exception() as ctx:
if not ip_lib.device_exists(tap_device_name):
# the exception was likely a side effect of the tap device
# being removed during handling so we just return false
# like we would if it didn't exist to begin with.
ctx.reraise = False
return False
def _add_tap_interface(self, network_id, network_type, physical_network,
segmentation_id, tap_device_name, device_owner):
"""Add tap interface.
If a VIF has been plugged into a network, this function will
add the corresponding tap device to the relevant bridge.
"""
if not ip_lib.device_exists(tap_device_name):
LOG.debug("Tap device: %s does not exist on "
"this host, skipped", tap_device_name)
return False
bridge_name = self.get_existing_bridge_name(physical_network)
if not bridge_name:
bridge_name = self.get_bridge_name(network_id)
if network_type == p_const.TYPE_LOCAL:
self.ensure_local_bridge(network_id, bridge_name)
else:
phy_dev_name = self.ensure_physical_in_bridge(network_id,
network_type,
physical_network,
segmentation_id)
if not phy_dev_name:
return False
self.ensure_tap_mtu(tap_device_name, phy_dev_name)
# Avoid messing with plugging devices into a bridge that the agent
# does not own
if device_owner.startswith(constants.DEVICE_OWNER_PREFIXES):
# Check if device needs to be added to bridge
if not bridge_lib.BridgeDevice.get_interface_bridge(
tap_device_name):
data = {'tap_device_name': tap_device_name,
'bridge_name': bridge_name}
LOG.debug("Adding device %(tap_device_name)s to bridge "
"%(bridge_name)s", data)
if bridge_lib.BridgeDevice(bridge_name).addif(tap_device_name):
return False
else:
data = {'tap_device_name': tap_device_name,
'device_owner': device_owner,
'bridge_name': bridge_name}
LOG.debug("Skip adding device %(tap_device_name)s to "
"%(bridge_name)s. It is owned by %(device_owner)s and "
"thus added elsewhere.", data)
return True
def ensure_tap_mtu(self, tap_dev_name, phy_dev_name):
"""Ensure the MTU on the tap is the same as the physical device."""
phy_dev_mtu = ip_lib.IPDevice(phy_dev_name).link.mtu
ip_lib.IPDevice(tap_dev_name).link.set_mtu(phy_dev_mtu)
def plug_interface(self, network_id, network_segment, tap_name,
device_owner):
return self.add_tap_interface(network_id, network_segment.network_type,
network_segment.physical_network,
network_segment.segmentation_id,
tap_name, device_owner)
def delete_bridge(self, bridge_name):
bridge_device = bridge_lib.BridgeDevice(bridge_name)
if bridge_device.exists():
physical_interfaces = set(self.interface_mappings.values())
interfaces_on_bridge = bridge_device.get_interfaces()
for interface in interfaces_on_bridge:
self.remove_interface(bridge_name, interface)
if interface.startswith(VXLAN_INTERFACE_PREFIX):
self.delete_interface(interface)
else:
# Match the vlan/flat interface in the bridge.
# If the bridge has an IP, it mean that this IP was moved
# from the current interface, which also mean that this
# interface was not created by the agent.
ips, gateway = self.get_interface_details(bridge_name)
if ips:
self.update_interface_ip_details(interface,
bridge_name,
ips, gateway)
elif interface not in physical_interfaces:
self.delete_interface(interface)
try:
LOG.debug("Deleting bridge %s", bridge_name)
if bridge_device.link.set_down():
return
if bridge_device.delbr():
return
LOG.debug("Done deleting bridge %s", bridge_name)
except RuntimeError:
with excutils.save_and_reraise_exception() as ctxt:
if not bridge_device.exists():
# the exception was likely a side effect of the bridge
# being removed by nova during handling,
# so we just return
ctxt.reraise = False
LOG.debug("Cannot delete bridge %s; it does not exist",
bridge_name)
return
else:
LOG.debug("Cannot delete bridge %s; it does not exist",
bridge_name)
def remove_interface(self, bridge_name, interface_name):
bridge_device = bridge_lib.BridgeDevice(bridge_name)
if bridge_device.exists():
if not bridge_lib.is_bridged_interface(interface_name):
return True
LOG.debug("Removing device %(interface_name)s from bridge "
"%(bridge_name)s",
{'interface_name': interface_name,
'bridge_name': bridge_name})
if bridge_device.delif(interface_name):
return False
LOG.debug("Done removing device %(interface_name)s from bridge "
"%(bridge_name)s",
{'interface_name': interface_name,
'bridge_name': bridge_name})
return True
else:
LOG.debug("Cannot remove device %(interface_name)s bridge "
"%(bridge_name)s does not exist",
{'interface_name': interface_name,
'bridge_name': bridge_name})
return False
def delete_interface(self, interface):
device = self.ip.device(interface)
if device.exists():
LOG.debug("Deleting interface %s",
interface)
device.link.set_down()
device.link.delete()
LOG.debug("Done deleting interface %s", interface)
def get_devices_modified_timestamps(self, devices):
return {d: bridge_lib.get_interface_bridged_time(d) for d in devices}
def get_all_devices(self):
devices = set()
for device in bridge_lib.get_bridge_names():
if device.startswith(constants.TAP_DEVICE_PREFIX):
devices.add(device)
return devices
def vxlan_ucast_supported(self):
if not cfg.CONF.VXLAN.l2_population:
return False
if not ip_lib.iproute_arg_supported(
['bridge', 'fdb'], 'append'):
LOG.warning(_LW('Option "%(option)s" must be supported by command '
'"%(command)s" to enable %(mode)s mode'),
{'option': 'append',
'command': 'bridge fdb',
'mode': 'VXLAN UCAST'})
return False
test_iface = None
for seg_id in moves.range(1, p_const.MAX_VXLAN_VNI + 1):
if (ip_lib.device_exists(self.get_vxlan_device_name(seg_id))
or ip_lib.vxlan_in_use(seg_id)):
continue
test_iface = self.ensure_vxlan(seg_id)
break
else:
LOG.error(_LE('No valid Segmentation ID to perform UCAST test.'))
return False
try:
utils.execute(
cmd=['bridge', 'fdb', 'append', constants.FLOODING_ENTRY[0],
'dev', test_iface, 'dst', '1.1.1.1'],
run_as_root=True, log_fail_as_error=False)
return True
except RuntimeError:
return False
finally:
self.delete_interface(test_iface)
def vxlan_mcast_supported(self):
if not cfg.CONF.VXLAN.vxlan_group:
LOG.warning(_LW('VXLAN muticast group(s) must be provided in '
'vxlan_group option to enable VXLAN MCAST mode'))
return False
if not ip_lib.iproute_arg_supported(
['ip', 'link', 'add', 'type', 'vxlan'],
'proxy'):
LOG.warning(_LW('Option "%(option)s" must be supported by command '
'"%(command)s" to enable %(mode)s mode'),
{'option': 'proxy',
'command': 'ip link add type vxlan',
'mode': 'VXLAN MCAST'})
return False
return True
def check_vxlan_support(self):
self.vxlan_mode = lconst.VXLAN_NONE
if self.vxlan_ucast_supported():
self.vxlan_mode = lconst.VXLAN_UCAST
elif self.vxlan_mcast_supported():
self.vxlan_mode = lconst.VXLAN_MCAST
else:
raise exceptions.VxlanNetworkUnsupported()
LOG.debug('Using %s VXLAN mode', self.vxlan_mode)
def fdb_ip_entry_exists(self, mac, ip, interface):
entries = utils.execute(['ip', 'neigh', 'show', 'to', ip,
'dev', interface],
run_as_root=True)
return mac in entries
def fdb_bridge_entry_exists(self, mac, interface, agent_ip=None):
entries = utils.execute(['bridge', 'fdb', 'show', 'dev', interface],
run_as_root=True)
if not agent_ip:
return mac in entries
return (agent_ip in entries and mac in entries)
def add_fdb_ip_entry(self, mac, ip, interface):
ip_lib.IPDevice(interface).neigh.add(ip, mac)
def remove_fdb_ip_entry(self, mac, ip, interface):
ip_lib.IPDevice(interface).neigh.delete(ip, mac)
def add_fdb_bridge_entry(self, mac, agent_ip, interface, operation="add"):
utils.execute(['bridge', 'fdb', operation, mac, 'dev', interface,
'dst', agent_ip],
run_as_root=True,
check_exit_code=False)
def remove_fdb_bridge_entry(self, mac, agent_ip, interface):
utils.execute(['bridge', 'fdb', 'del', mac, 'dev', interface,
'dst', agent_ip],
run_as_root=True,
check_exit_code=False)
def add_fdb_entries(self, agent_ip, ports, interface):
for mac, ip in ports:
if mac != constants.FLOODING_ENTRY[0]:
self.add_fdb_ip_entry(mac, ip, interface)
self.add_fdb_bridge_entry(mac, agent_ip, interface,
operation="replace")
elif self.vxlan_mode == lconst.VXLAN_UCAST:
if self.fdb_bridge_entry_exists(mac, interface):
self.add_fdb_bridge_entry(mac, agent_ip, interface,
"append")
else:
self.add_fdb_bridge_entry(mac, agent_ip, interface)
def remove_fdb_entries(self, agent_ip, ports, interface):
for mac, ip in ports:
if mac != constants.FLOODING_ENTRY[0]:
self.remove_fdb_ip_entry(mac, ip, interface)
self.remove_fdb_bridge_entry(mac, agent_ip, interface)
elif self.vxlan_mode == lconst.VXLAN_UCAST:
self.remove_fdb_bridge_entry(mac, agent_ip, interface)
def get_agent_id(self):
if self.bridge_mappings:
mac = utils.get_interface_mac(
list(self.bridge_mappings.values())[0])
else:
devices = ip_lib.IPWrapper().get_devices(True)
if devices:
mac = utils.get_interface_mac(devices[0].name)
else:
LOG.error(_LE("Unable to obtain MAC address for unique ID. "
"Agent terminated!"))
sys.exit(1)
return 'lb%s' % mac.replace(":", "")
def get_agent_configurations(self):
configurations = {'bridge_mappings': self.bridge_mappings,
'interface_mappings': self.interface_mappings
}
if self.vxlan_mode != lconst.VXLAN_NONE:
configurations['tunneling_ip'] = self.local_ip
configurations['tunnel_types'] = [p_const.TYPE_VXLAN]
configurations['l2_population'] = cfg.CONF.VXLAN.l2_population
return configurations
def get_rpc_callbacks(self, context, agent, sg_agent):
return LinuxBridgeRpcCallbacks(context, agent, sg_agent)
def get_rpc_consumers(self):
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.NETWORK, topics.UPDATE],
[topics.SECURITY_GROUP, topics.UPDATE]]
if cfg.CONF.VXLAN.l2_population:
consumers.append([topics.L2POPULATION, topics.UPDATE])
return consumers
def ensure_port_admin_state(self, tap_name, admin_state_up):
LOG.debug("Setting admin_state_up to %s for device %s",
admin_state_up, tap_name)
if admin_state_up:
ip_lib.IPDevice(tap_name).link.set_up()
else:
ip_lib.IPDevice(tap_name).link.set_down()
def setup_arp_spoofing_protection(self, device, device_details):
arp_protect.setup_arp_spoofing_protection(device, device_details)
def delete_arp_spoofing_protection(self, devices):
arp_protect.delete_arp_spoofing_protection(devices)
def delete_unreferenced_arp_protection(self, current_devices):
arp_protect.delete_unreferenced_arp_protection(current_devices)
def get_extension_driver_type(self):
return lconst.EXTENSION_DRIVER_TYPE
class LinuxBridgeRpcCallbacks(
sg_rpc.SecurityGroupAgentRpcCallbackMixin,
l2pop_rpc.L2populationRpcCallBackMixin,
amb.CommonAgentManagerRpcCallBackBase):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
# 1.3 Added param devices_to_update to security_groups_provider_updated
# 1.4 Added support for network_update
target = oslo_messaging.Target(version='1.4')
def network_delete(self, context, **kwargs):
LOG.debug("network_delete received")
network_id = kwargs.get('network_id')
# NOTE(nick-ma-z): Don't remove pre-existing user-defined bridges
if network_id in self.network_map:
phynet = self.network_map[network_id].physical_network
if phynet and phynet in self.agent.mgr.bridge_mappings:
LOG.info(_LI("Physical network %s is defined in "
"bridge_mappings and cannot be deleted."),
network_id)
return
else:
LOG.debug("Network %s is not on this agent.", network_id)
return
bridge_name = self.agent.mgr.get_bridge_name(network_id)
LOG.debug("Delete %s", bridge_name)
self.agent.mgr.delete_bridge(bridge_name)
def port_update(self, context, **kwargs):
port_id = kwargs['port']['id']
device_name = self.agent.mgr.get_tap_device_name(port_id)
# Put the device name in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
self.updated_devices.add(device_name)
LOG.debug("port_update RPC received for port: %s", port_id)
def network_update(self, context, **kwargs):
network_id = kwargs['network']['id']
LOG.debug("network_update message processed for network "
"%(network_id)s, with ports: %(ports)s",
{'network_id': network_id,
'ports': self.agent.network_ports[network_id]})
for port_data in self.agent.network_ports[network_id]:
self.updated_devices.add(port_data['device'])
def fdb_add(self, context, fdb_entries):
LOG.debug("fdb_add received")
for network_id, values in fdb_entries.items():
segment = self.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.mgr.get_vxlan_device_name(
segment.segmentation_id)
agent_ports = values.get('ports')
for agent_ip, ports in agent_ports.items():
if agent_ip == self.agent.mgr.local_ip:
continue
self.agent.mgr.add_fdb_entries(agent_ip,
ports,
interface)
def fdb_remove(self, context, fdb_entries):
LOG.debug("fdb_remove received")
for network_id, values in fdb_entries.items():
segment = self.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.mgr.get_vxlan_device_name(
segment.segmentation_id)
agent_ports = values.get('ports')
for agent_ip, ports in agent_ports.items():
if agent_ip == self.agent.mgr.local_ip:
continue
self.agent.mgr.remove_fdb_entries(agent_ip,
ports,
interface)
def _fdb_chg_ip(self, context, fdb_entries):
LOG.debug("update chg_ip received")
for network_id, agent_ports in fdb_entries.items():
segment = self.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.mgr.get_vxlan_device_name(
segment.segmentation_id)
for agent_ip, state in agent_ports.items():
if agent_ip == self.agent.mgr.local_ip:
continue
after = state.get('after', [])
for mac, ip in after:
self.agent.mgr.add_fdb_ip_entry(mac, ip, interface)
before = state.get('before', [])
for mac, ip in before:
self.agent.mgr.remove_fdb_ip_entry(mac, ip, interface)
def fdb_update(self, context, fdb_entries):
LOG.debug("fdb_update received")
for action, values in fdb_entries.items():
method = '_fdb_' + action
if not hasattr(self, method):
raise NotImplementedError()
getattr(self, method)(context, values)
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
try:
interface_mappings = n_utils.parse_mappings(
cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
except ValueError as e:
LOG.error(_LE("Parsing physical_interface_mappings failed: %s. "
"Agent terminated!"), e)
sys.exit(1)
LOG.info(_LI("Interface mappings: %s"), interface_mappings)
try:
bridge_mappings = n_utils.parse_mappings(
cfg.CONF.LINUX_BRIDGE.bridge_mappings)
except ValueError as e:
LOG.error(_LE("Parsing bridge_mappings failed: %s. "
"Agent terminated!"), e)
sys.exit(1)
LOG.info(_LI("Bridge mappings: %s"), bridge_mappings)
manager = LinuxBridgeManager(bridge_mappings, interface_mappings)
polling_interval = cfg.CONF.AGENT.polling_interval
quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout
agent = ca.CommonAgentLoop(manager, polling_interval, quitting_rpc_timeout,
constants.AGENT_TYPE_LINUXBRIDGE,
LB_AGENT_BINARY)
setup_profiler.setup("neutron-linuxbridge-agent", cfg.CONF.host)
LOG.info(_LI("Agent initialized successfully, now running... "))
launcher = service.launch(cfg.CONF, agent)
launcher.wait()
|
import json
from requests.exceptions import RequestException
from twisted.web.http import BAD_REQUEST, NOT_FOUND
from marathon_acme.clients._base import HTTPClient, get_single_header
class VaultError(RequestException):
"""
Exception type for Vault response errors. The ``errors`` parameter contains
a list of error messages. Roughly copies hvac's ``VaultError`` type:
https://github.com/hvac/hvac/blob/v0.6.4/hvac/exceptions.py#L1-L8
"""
def __init__(self, message=None, errors=None, response=None):
if errors:
message = ', '.join(errors)
self.errors = errors
super(VaultError, self).__init__(message, response=response)
class CasError(VaultError):
"""Exception type to indicate a Check-And-Set mismatch error. """
class VaultClient(HTTPClient):
"""
A very simple Vault client that can read and write to paths.
"""
def __init__(self, url, token, *args, **kwargs):
"""
:param url: the URL for Vault
:param token: the Vault auth token
"""
super(VaultClient, self).__init__(*args, url=url, **kwargs)
self._token = token
def request(self, method, path, *args, **kwargs):
headers = kwargs.pop('headers', {})
headers['X-Vault-Token'] = self._token
return super(VaultClient, self).request(
method, *args, path=path, headers=headers, **kwargs)
def _handle_response(self, response, check_cas=False):
if 400 <= response.code < 600:
return self._handle_error(response, check_cas)
return response.json()
def _handle_error(self, response, check_cas):
# Decode as utf-8. treq's text() method uses ISO-8859-1 which is
# correct for random text over HTTP, but not for JSON. Cross fingers
# that we don't receive anything non-utf-8.
d = response.text(encoding='utf-8')
def to_error(text):
# This logic is inspired by hvac as well:
# https://github.com/hvac/hvac/blob/v0.6.4/hvac/adapters.py#L227-L233
exc_type = VaultError
errors = None
if get_single_header(
response.headers, 'Content-Type') == 'application/json':
errors = json.loads(text).get('errors')
# Special case for 404s without extra errors: return None (hvac
# doesn't do this)
if response.code == NOT_FOUND and errors == []:
return None
# Special case for CAS mismatch errors: raise a CasError
# Unfortunately, Vault doesn't make it easy to differentiate
# between CAS errors and other errors so we have to check a few
# things.
if (check_cas and response.code == BAD_REQUEST and
errors and 'check-and-set' in errors[0]):
exc_type = CasError
# hvac returns more specific errors that are subclasses of its
# VaultError. For simplicity we just return fewer error types.
raise exc_type(text, errors=errors, response=response)
return d.addCallback(to_error)
def read(self, path, **params):
"""
Read data from Vault. Returns the JSON-decoded response.
"""
d = self.request('GET', '/v1/' + path, params=params)
return d.addCallback(self._handle_response)
def write(self, path, **data):
"""
Write data to Vault. Returns the JSON-decoded response.
"""
d = self.request('PUT', '/v1/' + path, json=data)
return d.addCallback(self._handle_response, check_cas=True)
def read_kv2(self, path, version=None, mount_path='secret'):
"""
Read some data from a key/value version 2 secret engine.
"""
params = {}
if version is not None:
params['version'] = version
read_path = '{}/data/{}'.format(mount_path, path)
return self.read(read_path, **params)
def create_or_update_kv2(self, path, data, cas=None, mount_path='secret'):
"""
Create or update some data in a key/value version 2 secret engine.
"""
params = {
'options': {},
'data': data
}
if cas is not None:
params['options']['cas'] = cas
write_path = '{}/data/{}'.format(mount_path, path)
return self.write(write_path, **params)
VaultClient: Add doc note about CasError
import json
from requests.exceptions import RequestException
from twisted.web.http import BAD_REQUEST, NOT_FOUND
from marathon_acme.clients._base import HTTPClient, get_single_header
class VaultError(RequestException):
"""
Exception type for Vault response errors. The ``errors`` parameter contains
a list of error messages. Roughly copies hvac's ``VaultError`` type:
https://github.com/hvac/hvac/blob/v0.6.4/hvac/exceptions.py#L1-L8
"""
def __init__(self, message=None, errors=None, response=None):
if errors:
message = ', '.join(errors)
self.errors = errors
super(VaultError, self).__init__(message, response=response)
class CasError(VaultError):
"""Exception type to indicate a Check-And-Set mismatch error. """
class VaultClient(HTTPClient):
"""
A very simple Vault client that can read and write to paths.
"""
def __init__(self, url, token, *args, **kwargs):
"""
:param url: the URL for Vault
:param token: the Vault auth token
"""
super(VaultClient, self).__init__(*args, url=url, **kwargs)
self._token = token
def request(self, method, path, *args, **kwargs):
headers = kwargs.pop('headers', {})
headers['X-Vault-Token'] = self._token
return super(VaultClient, self).request(
method, *args, path=path, headers=headers, **kwargs)
def _handle_response(self, response, check_cas=False):
if 400 <= response.code < 600:
return self._handle_error(response, check_cas)
return response.json()
def _handle_error(self, response, check_cas):
# Decode as utf-8. treq's text() method uses ISO-8859-1 which is
# correct for random text over HTTP, but not for JSON. Cross fingers
# that we don't receive anything non-utf-8.
d = response.text(encoding='utf-8')
def to_error(text):
# This logic is inspired by hvac as well:
# https://github.com/hvac/hvac/blob/v0.6.4/hvac/adapters.py#L227-L233
exc_type = VaultError
errors = None
if get_single_header(
response.headers, 'Content-Type') == 'application/json':
errors = json.loads(text).get('errors')
# Special case for 404s without extra errors: return None (hvac
# doesn't do this)
if response.code == NOT_FOUND and errors == []:
return None
# Special case for CAS mismatch errors: raise a CasError
# Unfortunately, Vault doesn't make it easy to differentiate
# between CAS errors and other errors so we have to check a few
# things.
if (check_cas and response.code == BAD_REQUEST and
errors and 'check-and-set' in errors[0]):
exc_type = CasError
# hvac returns more specific errors that are subclasses of its
# VaultError. For simplicity we just return fewer error types.
raise exc_type(text, errors=errors, response=response)
return d.addCallback(to_error)
def read(self, path, **params):
"""
Read data from Vault. Returns the JSON-decoded response.
"""
d = self.request('GET', '/v1/' + path, params=params)
return d.addCallback(self._handle_response)
def write(self, path, **data):
"""
Write data to Vault. Returns the JSON-decoded response.
"""
d = self.request('PUT', '/v1/' + path, json=data)
return d.addCallback(self._handle_response, check_cas=True)
def read_kv2(self, path, version=None, mount_path='secret'):
"""
Read some data from a key/value version 2 secret engine.
"""
params = {}
if version is not None:
params['version'] = version
read_path = '{}/data/{}'.format(mount_path, path)
return self.read(read_path, **params)
def create_or_update_kv2(self, path, data, cas=None, mount_path='secret'):
"""
Create or update some data in a key/value version 2 secret engine.
:raises CasError:
Raises an error if the ``cas`` value, when provided, doesn't match
Vault's version for the key.
"""
params = {
'options': {},
'data': data
}
if cas is not None:
params['options']['cas'] = cas
write_path = '{}/data/{}'.format(mount_path, path)
return self.write(write_path, **params)
|
import sqlite3
import sys
import os
import urllib.request
''' Downloading Functions '''
def download_images(database_name, downloaded_image_directory, number_to_scrape):
# Connect to database
con = sqlite3.connect(database_name)
with con:
# Get image urls from photos table
cur = con.cursor()
cur.execute("SELECT url FROM photos")
# Init J
j = 0
while j < number_to_scrape:
for url in cur:
for i in url:
print('Image url: ' + i)
os.chdir(downloaded_image_directory)
image_name = (sys.argv[1] + '_' + str(j) + '.jpg')
urllib.request.urlretrieve(i, image_name)
print ('Downloaded: {0}'.format(image_name))
j += 1
print('Finished scraping and downloading')
Fix download to conform with PEP8
import sqlite3
import sys
import os
import urllib.request
''' Downloading Functions '''
def download_images(database_name,
downloaded_image_directory,
number_to_scrape):
# Connect to database
con = sqlite3.connect(database_name)
with con:
# Get image urls from photos table
cur = con.cursor()
cur.execute("SELECT url FROM photos")
# Init J
j = 0
while j < number_to_scrape:
for url in cur:
for i in url:
print('Image url: ' + i)
os.chdir(downloaded_image_directory)
image_name = (sys.argv[1] + '_' + str(j) + '.jpg')
urllib.request.urlretrieve(i, image_name)
print ('Downloaded: {0}'.format(image_name))
j += 1
print('Finished scraping and downloading')
|
"""
Bouncing Ball with Vectors
by Daniel Shiffman.
Demonstration of using vectors to control motion of body.
This example is not object-oriented.
See AccelerationWithVectors for an example of how to simulate motion using
vectors in an object.
"""
location = PVector(100, 100) # Location of shape
velocity = PVector(1.5, 2.1) # Velocity of shape
gravity = PVector(0, 0.2) # Gravity acts at the shape's acceleration.
def setup():
size(640, 360)
smooth()
def draw():
background(0)
# Add velocity to the location.
location.add(velocity)
# Add gravity to velocity.
velocity.add(gravity)
# Bounce off edges.
if location.x < 0 or location.x > width:
velocity.x = velocity.x * -1
if location.y > height:
# We're reducing velocity ever so slightly
# when it hits the bottom of the window.
velocity.y = velocity.y * -0.95
location.y = height
# Display circle at location vector.
stroke(255)
strokeWeight(2)
fill(127)
ellipse(location.x, location.y, 48, 48)
Copyedit
"""
Bouncing Ball with Vectors
by Daniel Shiffman.
Demonstration of using vectors to control motion of body.
This example is not object-oriented.
See AccelerationWithVectors for an example of how to simulate motion using
vectors in an object.
"""
location = PVector(100, 100) # Location of shape
velocity = PVector(1.5, 2.1) # Velocity of shape
gravity = PVector(0, 0.2) # Gravity acts at the shape's acceleration.
def setup():
size(640, 360)
smooth()
def draw():
background(0)
# Add velocity to the location.
location.add(velocity)
# Add gravity to velocity.
velocity.add(gravity)
# Bounce off edges.
if location.x < 0 or location.x > width:
velocity.x = velocity.x * -1
if location.y > height:
# We're reducing velocity ever so slightly
# when it hits the bottom of the window.
velocity.y = velocity.y * -0.95
location.y = height
# Display circle at location vector.
stroke(255)
strokeWeight(2)
fill(127)
ellipse(location.x, location.y, 48, 48)
|
import logging
from collections import defaultdict
from datetime import datetime
from django.db.models import prefetch_related_objects, Q
from reference_data.models import HumanPhenotypeOntology
from matchmaker.models import MatchmakerSubmission, MatchmakerIncomingQuery, MatchmakerResult
from seqr.utils.gene_utils import get_genes, get_gene_ids_for_gene_symbols, get_filtered_gene_ids
from seqr.views.utils.json_to_orm_utils import create_model_from_json
from settings import MME_DEFAULT_CONTACT_INSTITUTION
logger = logging.getLogger(__name__)
def get_mme_genes_phenotypes_for_results(results, **kwargs):
return _get_mme_genes_phenotypes(
results, _get_patient_features, _get_patient_genomic_features, include_matched_symbol_genes=True, **kwargs)
def get_mme_genes_phenotypes_for_submissions(submissions):
return _get_mme_genes_phenotypes(submissions, _get_submisson_features, _get_submisson_genomic_features)
def _get_patient_features(result):
return result['patient'].get('features')
def _get_patient_genomic_features(result):
return result['patient'].get('genomicFeatures')
def _get_submisson_features(submisson):
return submisson.features
def _get_submisson_genomic_features(submisson):
return submisson.genomic_features
def _get_mme_gene_phenotype_ids(results, get_features, get_genomic_features, additional_genes=None, additional_hpo_ids=None):
hpo_ids = additional_hpo_ids if additional_hpo_ids else set()
genes = additional_genes if additional_genes else set()
for result in results:
hpo_ids.update({feature['id'] for feature in (get_features(result) or []) if feature.get('id')})
genes.update({gene_feature['gene']['id'] for gene_feature in (get_genomic_features(result) or [])
if gene_feature.get('gene', {}).get('id')})
gene_ids = {gene for gene in genes if gene.startswith('ENSG')}
gene_symols = {gene for gene in genes if not gene.startswith('ENSG')}
return hpo_ids, gene_ids, gene_symols
def _get_mme_genes_phenotypes(results, get_features, get_genomic_features, include_matched_symbol_genes=False, **kwargs):
hpo_ids, gene_ids, gene_symbols = _get_mme_gene_phenotype_ids(results, get_features, get_genomic_features, **kwargs)
gene_symbols_to_ids = get_gene_ids_for_gene_symbols(gene_symbols)
if include_matched_symbol_genes:
# Include all gene IDs associated with the given symbol
for new_gene_ids in gene_symbols_to_ids.values():
gene_ids.update(new_gene_ids)
# Include any gene IDs whose legacy id is the given symbol
for gene_symbol in gene_symbols:
legacy_gene_ids = get_filtered_gene_ids(
Q(dbnsfpgene__gene_names__startswith='{};'.format(gene_symbol)) |
Q(dbnsfpgene__gene_names__endswith=';{}'.format(gene_symbol)) |
Q(dbnsfpgene__gene_names__contains=';{};'.format(gene_symbol))
)
gene_symbols_to_ids[gene_symbol] += legacy_gene_ids
gene_ids.update(legacy_gene_ids)
else:
gene_ids.update({new_gene_ids[0] for new_gene_ids in gene_symbols_to_ids.values()})
genes_by_id = get_genes(gene_ids)
hpo_terms_by_id = {hpo.hpo_id: hpo.name for hpo in HumanPhenotypeOntology.objects.filter(hpo_id__in=hpo_ids)}
return hpo_terms_by_id, genes_by_id, gene_symbols_to_ids
def parse_mme_features(features, hpo_terms_by_id):
phenotypes = [feature for feature in (features or [])]
for feature in phenotypes:
feature['label'] = hpo_terms_by_id.get(feature['id'])
return phenotypes
def parse_mme_gene_variants(genomic_features, gene_symbols_to_ids):
gene_variants = []
for gene_feature in (genomic_features or []):
gene_ids = get_gene_ids_for_feature(gene_feature, gene_symbols_to_ids)
gene_id = gene_ids[0] if gene_ids else None
if gene_id:
gene_variant = {'geneId': gene_id}
if gene_feature.get('variant'):
gene_variant.update({
'alt': gene_feature['variant'].get('alternateBases'),
'ref': gene_feature['variant'].get('referenceBases'),
'chrom': gene_feature['variant'].get('referenceName'),
'pos': gene_feature['variant'].get('start'),
'end': gene_feature['variant'].get('end'),
'genomeVersion': gene_feature['variant'].get('assembly'),
})
gene_variants.append(gene_variant)
return gene_variants
def get_gene_ids_for_feature(gene_feature, gene_symbols_to_ids):
gene_id = gene_feature.get('gene', {}).get('id')
if not gene_id:
return []
if not gene_id.startswith('ENSG'):
gene_ids = gene_symbols_to_ids.get(gene_feature['gene']['id'], [])
else:
gene_ids = [gene_id]
return gene_ids
def parse_mme_patient(result, hpo_terms_by_id, gene_symbols_to_ids, submission_guid):
phenotypes = parse_mme_features(_get_patient_features(result), hpo_terms_by_id)
gene_variants = parse_mme_gene_variants(_get_patient_genomic_features(result), gene_symbols_to_ids)
parsed_result = {
'geneVariants': gene_variants,
'phenotypes': phenotypes,
'submissionGuid': submission_guid,
}
parsed_result.update(result)
return parsed_result
def get_submission_json_for_external_match(submission, score=None):
submission_json = {
'patient': {
'id': submission.submission_id,
'label': submission.label,
'contact': {
'href': submission.contact_href.replace(' ', ''),
'name': submission.contact_name,
'institution': MME_DEFAULT_CONTACT_INSTITUTION,
},
'species': 'NCBITaxon:9606',
'features': submission.features,
'genomicFeatures': submission.genomic_features,
}
}
sex = MatchmakerSubmission.SEX_LOOKUP.get(submission.individual.sex)
if sex:
submission_json['patient']['sex'] = sex
if score:
submission_json['score'] = score
return submission_json
def get_mme_matches(patient_data, origin_request_host=None, user=None, originating_submission=None):
hpo_terms_by_id, genes_by_id, gene_symbols_to_ids = get_mme_genes_phenotypes_for_results([patient_data])
genomic_features = _get_patient_genomic_features(patient_data)
feature_ids = [
feature['id'] for feature in (_get_patient_features(patient_data) or []) if
feature.get('observed', 'yes') == 'yes' and feature['id'] in hpo_terms_by_id
]
if genomic_features:
for feature in genomic_features:
feature['gene_ids'] = get_gene_ids_for_feature(feature, gene_symbols_to_ids)
get_submission_kwargs = {
'query_ids': list(genes_by_id.keys()),
'filter_key': 'genomic_features',
'id_filter_func': lambda gene_id: {'gene': {'id': gene_id}},
}
else:
get_submission_kwargs = {
'query_ids': feature_ids,
'filter_key': 'features',
'id_filter_func': lambda feature_id: {'id': feature_id, 'observed': 'yes'},
}
query_patient_id = patient_data['patient']['id']
scored_matches = _get_matched_submissions(
query_patient_id,
get_match_genotype_score=lambda match: _get_genotype_score(genomic_features, match) if genomic_features else 0,
get_match_phenotype_score=lambda match: _get_phenotype_score(feature_ids, match) if feature_ids else 0,
**get_submission_kwargs
)
incoming_query = create_model_from_json(MatchmakerIncomingQuery, {
'institution': patient_data['patient']['contact'].get('institution') or origin_request_host,
'patient_id': query_patient_id if scored_matches else None,
}, user)
if not scored_matches:
return [], incoming_query
prefetch_related_objects(list(scored_matches.keys()), 'matchmakerresult_set')
for match_submission in scored_matches.keys():
if not match_submission.matchmakerresult_set.filter(result_data__patient__id=query_patient_id):
create_model_from_json( MatchmakerResult, {
'submission': match_submission,
'originating_submission': originating_submission,
'originating_query': incoming_query,
'result_data': patient_data,
'last_modified_by': user,
}, user)
return [get_submission_json_for_external_match(match_submission, score=score)
for match_submission, score in scored_matches.items()], incoming_query
def _get_matched_submissions(patient_id, get_match_genotype_score, get_match_phenotype_score, query_ids, filter_key, id_filter_func):
if not query_ids:
# no valid entities found for provided features
return {}
matches = []
for item_id in query_ids:
matches += MatchmakerSubmission.objects.filter(**{
'{}__contains'.format(filter_key): [id_filter_func(item_id)]
}).exclude(submission_id=patient_id)
scored_matches = {}
for match in matches:
genotype_score = get_match_genotype_score(match)
phenotype_score = get_match_phenotype_score(match)
if genotype_score > 0 or phenotype_score > 0.65:
scored_matches[match] = {
'_genotypeScore': genotype_score,
'_phenotypeScore': phenotype_score,
'patient': 1 if genotype_score == 1 else round(genotype_score * (phenotype_score or 1), 4)
}
return scored_matches
def _get_genotype_score(genomic_features, match):
match_features_by_gene_id = defaultdict(list)
for feature in match.genomic_features:
match_features_by_gene_id[feature['gene']['id']].append(feature)
score = 0
for feature in genomic_features:
feature_gene_matches = []
for gene_id in feature['gene_ids']:
feature_gene_matches += match_features_by_gene_id[gene_id]
if feature_gene_matches:
score += 0.7
if feature.get('zygosity') and any(
match_feature.get('zygosity') == feature['zygosity'] for match_feature in feature_gene_matches
):
score += 0.15
if feature.get('variant') and any(
_is_same_variant(feature['variant'], match_feature['variant'])
for match_feature in feature_gene_matches if match_feature.get('variant')
):
score += 0.15
return float(score) / len(genomic_features)
def _is_same_variant(var1, var2):
for field in {'alternateBases', 'referenceBases', 'referenceName', 'start', 'assembly'}:
if var1.get(field) and var1.get(field) != var2.get(field):
return False
return True
def _get_phenotype_score(hpo_ids, match):
if not match.features:
return 0.5
matched_hpo_ids = [
hpo_id for hpo_id in hpo_ids
if any(feature['id'] == hpo_id and feature.get('observed', 'yes') == 'yes' for feature in match.features)
]
return float(len(matched_hpo_ids)) / len(hpo_ids) or 0.1
def get_mme_metrics():
submissions = MatchmakerSubmission.objects.filter(deleted_date__isnull=True)
hpo_ids, gene_ids, gene_symols = _get_mme_gene_phenotype_ids(
submissions, _get_submisson_features, _get_submisson_genomic_features
)
if gene_symols:
logger.error('Found unexpected gene in MME: {}'.format(', '.join(gene_symols)))
submitters = set()
for submission in submissions:
submitters.update({name.strip() for name in submission.contact_name.split(',')})
incoming_request_count = MatchmakerIncomingQuery.objects.count()
matched_incoming_request_count = MatchmakerIncomingQuery.objects.filter(
patient_id__isnull=False).distinct('patient_id').count()
return {
"numberOfCases": submissions.count(),
"numberOfSubmitters": len(submitters),
"numberOfUniqueGenes": len(gene_ids),
"numberOfUniqueFeatures": len(hpo_ids),
"numberOfRequestsReceived": incoming_request_count,
"numberOfPotentialMatchesSent": matched_incoming_request_count,
"dateGenerated": datetime.now().strftime('%Y-%m-%d'),
}
MME_DISCLAIMER = """The data in Matchmaker Exchange is provided for research use only. Broad Institute provides the data
in Matchmaker Exchange 'as is'. Broad Institute makes no representations or warranties of any kind concerning the data,
express or implied, including without limitation, warranties of merchantability, fitness for a particular purpose,
noninfringement, or the absence of latent or other defects, whether or not discoverable. Broad will not be liable to the
user or any third parties claiming through user, for any loss or damage suffered through the use of Matchmaker Exchange.
In no event shall Broad Institute or its respective directors, officers, employees, affiliated investigators and
affiliates be liable for indirect, special, incidental or consequential damages or injury to property and lost profits,
regardless of whether the foregoing have been advised, shall have other reason to know, or in fact shall know of the
possibility of the foregoing. Prior to using Broad Institute data in a publication, the user will contact the owner of
the matching dataset to assess the integrity of the match. If the match is validated, the user will offer appropriate
recognition of the data owner's contribution, in accordance with academic standards and custom. Proper acknowledgment
shall be made for the contributions of a party to such results being published or otherwise disclosed, which may include
co-authorship. If Broad Institute contributes to the results being published, the authors must acknowledge Broad
Institute using the following wording: 'This study makes use of data shared through the Broad Institute matchbox
repository. Funding for the Broad Institute was provided in part by National Institutes of Health grant UM1 HG008900 to
Daniel MacArthur and Heidi Rehm.' User will not attempt to use the data or Matchmaker Exchange to establish the
individual identities of any of the subjects from whom the data were obtained. This applies to matches made within Broad
Institute or with any other database included in the Matchmaker Exchange.""".replace('\n', ' ')
dont retrurn deleted mme submissions
import logging
from collections import defaultdict
from datetime import datetime
from django.db.models import prefetch_related_objects, Q
from reference_data.models import HumanPhenotypeOntology
from matchmaker.models import MatchmakerSubmission, MatchmakerIncomingQuery, MatchmakerResult
from seqr.utils.gene_utils import get_genes, get_gene_ids_for_gene_symbols, get_filtered_gene_ids
from seqr.views.utils.json_to_orm_utils import create_model_from_json
from settings import MME_DEFAULT_CONTACT_INSTITUTION
logger = logging.getLogger(__name__)
def get_mme_genes_phenotypes_for_results(results, **kwargs):
return _get_mme_genes_phenotypes(
results, _get_patient_features, _get_patient_genomic_features, include_matched_symbol_genes=True, **kwargs)
def get_mme_genes_phenotypes_for_submissions(submissions):
return _get_mme_genes_phenotypes(submissions, _get_submisson_features, _get_submisson_genomic_features)
def _get_patient_features(result):
return result['patient'].get('features')
def _get_patient_genomic_features(result):
return result['patient'].get('genomicFeatures')
def _get_submisson_features(submisson):
return submisson.features
def _get_submisson_genomic_features(submisson):
return submisson.genomic_features
def _get_mme_gene_phenotype_ids(results, get_features, get_genomic_features, additional_genes=None, additional_hpo_ids=None):
hpo_ids = additional_hpo_ids if additional_hpo_ids else set()
genes = additional_genes if additional_genes else set()
for result in results:
hpo_ids.update({feature['id'] for feature in (get_features(result) or []) if feature.get('id')})
genes.update({gene_feature['gene']['id'] for gene_feature in (get_genomic_features(result) or [])
if gene_feature.get('gene', {}).get('id')})
gene_ids = {gene for gene in genes if gene.startswith('ENSG')}
gene_symols = {gene for gene in genes if not gene.startswith('ENSG')}
return hpo_ids, gene_ids, gene_symols
def _get_mme_genes_phenotypes(results, get_features, get_genomic_features, include_matched_symbol_genes=False, **kwargs):
hpo_ids, gene_ids, gene_symbols = _get_mme_gene_phenotype_ids(results, get_features, get_genomic_features, **kwargs)
gene_symbols_to_ids = get_gene_ids_for_gene_symbols(gene_symbols)
if include_matched_symbol_genes:
# Include all gene IDs associated with the given symbol
for new_gene_ids in gene_symbols_to_ids.values():
gene_ids.update(new_gene_ids)
# Include any gene IDs whose legacy id is the given symbol
for gene_symbol in gene_symbols:
legacy_gene_ids = get_filtered_gene_ids(
Q(dbnsfpgene__gene_names__startswith='{};'.format(gene_symbol)) |
Q(dbnsfpgene__gene_names__endswith=';{}'.format(gene_symbol)) |
Q(dbnsfpgene__gene_names__contains=';{};'.format(gene_symbol))
)
gene_symbols_to_ids[gene_symbol] += legacy_gene_ids
gene_ids.update(legacy_gene_ids)
else:
gene_ids.update({new_gene_ids[0] for new_gene_ids in gene_symbols_to_ids.values()})
genes_by_id = get_genes(gene_ids)
hpo_terms_by_id = {hpo.hpo_id: hpo.name for hpo in HumanPhenotypeOntology.objects.filter(hpo_id__in=hpo_ids)}
return hpo_terms_by_id, genes_by_id, gene_symbols_to_ids
def parse_mme_features(features, hpo_terms_by_id):
phenotypes = [feature for feature in (features or [])]
for feature in phenotypes:
feature['label'] = hpo_terms_by_id.get(feature['id'])
return phenotypes
def parse_mme_gene_variants(genomic_features, gene_symbols_to_ids):
gene_variants = []
for gene_feature in (genomic_features or []):
gene_ids = get_gene_ids_for_feature(gene_feature, gene_symbols_to_ids)
gene_id = gene_ids[0] if gene_ids else None
if gene_id:
gene_variant = {'geneId': gene_id}
if gene_feature.get('variant'):
gene_variant.update({
'alt': gene_feature['variant'].get('alternateBases'),
'ref': gene_feature['variant'].get('referenceBases'),
'chrom': gene_feature['variant'].get('referenceName'),
'pos': gene_feature['variant'].get('start'),
'end': gene_feature['variant'].get('end'),
'genomeVersion': gene_feature['variant'].get('assembly'),
})
gene_variants.append(gene_variant)
return gene_variants
def get_gene_ids_for_feature(gene_feature, gene_symbols_to_ids):
gene_id = gene_feature.get('gene', {}).get('id')
if not gene_id:
return []
if not gene_id.startswith('ENSG'):
gene_ids = gene_symbols_to_ids.get(gene_feature['gene']['id'], [])
else:
gene_ids = [gene_id]
return gene_ids
def parse_mme_patient(result, hpo_terms_by_id, gene_symbols_to_ids, submission_guid):
phenotypes = parse_mme_features(_get_patient_features(result), hpo_terms_by_id)
gene_variants = parse_mme_gene_variants(_get_patient_genomic_features(result), gene_symbols_to_ids)
parsed_result = {
'geneVariants': gene_variants,
'phenotypes': phenotypes,
'submissionGuid': submission_guid,
}
parsed_result.update(result)
return parsed_result
def get_submission_json_for_external_match(submission, score=None):
submission_json = {
'patient': {
'id': submission.submission_id,
'label': submission.label,
'contact': {
'href': submission.contact_href.replace(' ', ''),
'name': submission.contact_name,
'institution': MME_DEFAULT_CONTACT_INSTITUTION,
},
'species': 'NCBITaxon:9606',
'features': submission.features,
'genomicFeatures': submission.genomic_features,
}
}
sex = MatchmakerSubmission.SEX_LOOKUP.get(submission.individual.sex)
if sex:
submission_json['patient']['sex'] = sex
if score:
submission_json['score'] = score
return submission_json
def get_mme_matches(patient_data, origin_request_host=None, user=None, originating_submission=None):
hpo_terms_by_id, genes_by_id, gene_symbols_to_ids = get_mme_genes_phenotypes_for_results([patient_data])
genomic_features = _get_patient_genomic_features(patient_data)
feature_ids = [
feature['id'] for feature in (_get_patient_features(patient_data) or []) if
feature.get('observed', 'yes') == 'yes' and feature['id'] in hpo_terms_by_id
]
if genomic_features:
for feature in genomic_features:
feature['gene_ids'] = get_gene_ids_for_feature(feature, gene_symbols_to_ids)
get_submission_kwargs = {
'query_ids': list(genes_by_id.keys()),
'filter_key': 'genomic_features',
'id_filter_func': lambda gene_id: {'gene': {'id': gene_id}},
}
else:
get_submission_kwargs = {
'query_ids': feature_ids,
'filter_key': 'features',
'id_filter_func': lambda feature_id: {'id': feature_id, 'observed': 'yes'},
}
query_patient_id = patient_data['patient']['id']
scored_matches = _get_matched_submissions(
query_patient_id,
get_match_genotype_score=lambda match: _get_genotype_score(genomic_features, match) if genomic_features else 0,
get_match_phenotype_score=lambda match: _get_phenotype_score(feature_ids, match) if feature_ids else 0,
**get_submission_kwargs
)
incoming_query = create_model_from_json(MatchmakerIncomingQuery, {
'institution': patient_data['patient']['contact'].get('institution') or origin_request_host,
'patient_id': query_patient_id if scored_matches else None,
}, user)
if not scored_matches:
return [], incoming_query
prefetch_related_objects(list(scored_matches.keys()), 'matchmakerresult_set')
for match_submission in scored_matches.keys():
if not match_submission.matchmakerresult_set.filter(result_data__patient__id=query_patient_id):
create_model_from_json( MatchmakerResult, {
'submission': match_submission,
'originating_submission': originating_submission,
'originating_query': incoming_query,
'result_data': patient_data,
'last_modified_by': user,
}, user)
return [get_submission_json_for_external_match(match_submission, score=score)
for match_submission, score in scored_matches.items()], incoming_query
def _get_matched_submissions(patient_id, get_match_genotype_score, get_match_phenotype_score, query_ids, filter_key, id_filter_func):
if not query_ids:
# no valid entities found for provided features
return {}
matches = []
for item_id in query_ids:
matches += MatchmakerSubmission.objects.filter(deleted_date__isnull=True, **{
'{}__contains'.format(filter_key): [id_filter_func(item_id)]
}).exclude(submission_id=patient_id)
scored_matches = {}
for match in matches:
genotype_score = get_match_genotype_score(match)
phenotype_score = get_match_phenotype_score(match)
if genotype_score > 0 or phenotype_score > 0.65:
scored_matches[match] = {
'_genotypeScore': genotype_score,
'_phenotypeScore': phenotype_score,
'patient': 1 if genotype_score == 1 else round(genotype_score * (phenotype_score or 1), 4)
}
return scored_matches
def _get_genotype_score(genomic_features, match):
match_features_by_gene_id = defaultdict(list)
for feature in match.genomic_features:
match_features_by_gene_id[feature['gene']['id']].append(feature)
score = 0
for feature in genomic_features:
feature_gene_matches = []
for gene_id in feature['gene_ids']:
feature_gene_matches += match_features_by_gene_id[gene_id]
if feature_gene_matches:
score += 0.7
if feature.get('zygosity') and any(
match_feature.get('zygosity') == feature['zygosity'] for match_feature in feature_gene_matches
):
score += 0.15
if feature.get('variant') and any(
_is_same_variant(feature['variant'], match_feature['variant'])
for match_feature in feature_gene_matches if match_feature.get('variant')
):
score += 0.15
return float(score) / len(genomic_features)
def _is_same_variant(var1, var2):
for field in {'alternateBases', 'referenceBases', 'referenceName', 'start', 'assembly'}:
if var1.get(field) and var1.get(field) != var2.get(field):
return False
return True
def _get_phenotype_score(hpo_ids, match):
if not match.features:
return 0.5
matched_hpo_ids = [
hpo_id for hpo_id in hpo_ids
if any(feature['id'] == hpo_id and feature.get('observed', 'yes') == 'yes' for feature in match.features)
]
return float(len(matched_hpo_ids)) / len(hpo_ids) or 0.1
def get_mme_metrics():
submissions = MatchmakerSubmission.objects.filter(deleted_date__isnull=True)
hpo_ids, gene_ids, gene_symols = _get_mme_gene_phenotype_ids(
submissions, _get_submisson_features, _get_submisson_genomic_features
)
if gene_symols:
logger.error('Found unexpected gene in MME: {}'.format(', '.join(gene_symols)))
submitters = set()
for submission in submissions:
submitters.update({name.strip() for name in submission.contact_name.split(',')})
incoming_request_count = MatchmakerIncomingQuery.objects.count()
matched_incoming_request_count = MatchmakerIncomingQuery.objects.filter(
patient_id__isnull=False).distinct('patient_id').count()
return {
"numberOfCases": submissions.count(),
"numberOfSubmitters": len(submitters),
"numberOfUniqueGenes": len(gene_ids),
"numberOfUniqueFeatures": len(hpo_ids),
"numberOfRequestsReceived": incoming_request_count,
"numberOfPotentialMatchesSent": matched_incoming_request_count,
"dateGenerated": datetime.now().strftime('%Y-%m-%d'),
}
MME_DISCLAIMER = """The data in Matchmaker Exchange is provided for research use only. Broad Institute provides the data
in Matchmaker Exchange 'as is'. Broad Institute makes no representations or warranties of any kind concerning the data,
express or implied, including without limitation, warranties of merchantability, fitness for a particular purpose,
noninfringement, or the absence of latent or other defects, whether or not discoverable. Broad will not be liable to the
user or any third parties claiming through user, for any loss or damage suffered through the use of Matchmaker Exchange.
In no event shall Broad Institute or its respective directors, officers, employees, affiliated investigators and
affiliates be liable for indirect, special, incidental or consequential damages or injury to property and lost profits,
regardless of whether the foregoing have been advised, shall have other reason to know, or in fact shall know of the
possibility of the foregoing. Prior to using Broad Institute data in a publication, the user will contact the owner of
the matching dataset to assess the integrity of the match. If the match is validated, the user will offer appropriate
recognition of the data owner's contribution, in accordance with academic standards and custom. Proper acknowledgment
shall be made for the contributions of a party to such results being published or otherwise disclosed, which may include
co-authorship. If Broad Institute contributes to the results being published, the authors must acknowledge Broad
Institute using the following wording: 'This study makes use of data shared through the Broad Institute matchbox
repository. Funding for the Broad Institute was provided in part by National Institutes of Health grant UM1 HG008900 to
Daniel MacArthur and Heidi Rehm.' User will not attempt to use the data or Matchmaker Exchange to establish the
individual identities of any of the subjects from whom the data were obtained. This applies to matches made within Broad
Institute or with any other database included in the Matchmaker Exchange.""".replace('\n', ' ')
|
# encoding: utf-8
from __future__ import unicode_literals
if __debug__:
import time
from datetime import datetime, timedelta
from requests import codes
from requests.auth import AuthBase
# Protocol-Mandated Imports
from binascii import hexlify, unhexlify
from hashlib import sha256
from ecdsa import SigningKey, VerifyingKey
from ecdsa.keys import BadSignatureError
log = __import__('logging').getLogger(__name__)
try:
unicode = unicode
str = str
except:
unicode = str
str = bytes
class SignedAuth(AuthBase):
CANONICAL_REQUEST_STRUCTURE = "{r.method}\n{r.headers[date]}\n{r.url}\n{r.body}" # Ref: Application 2.i.
CANONICAL_RESPONSE_STRUCTURE = "{identity}\n{r.request.method}\n{date}\n" \
"{r.request.url}\n{r.text}" # Ref: Server 4.ii.
def __init__(self, identity, private, public):
"""Configure HTDSA signed request/response authentication.
To perform the cryptographic operations required for the HTDSA protocol you must pass in either instances of
`ecdsa` signing and verifying keys, or their hex-encoded versions which will be converted automatically.
Additionally, the identity token (opaque identifier) assigned to your client application by the provider will
need to be passed in so we can identify ourselves.
The private key is your application's private key. The public key is the provider's service key you were given
when registering your application.
"""
self.identity = identity
self.private = SigningKey.from_string(unhexlify(private)) if isinstance(private, (str, unicode)) else private
self.public = VerifyingKey.from_string(unhexlify(public)) if isinstance(public, (str, unicode)) else public
def __call__(self, request):
if __debug__:
log.debug("Signing HTTP {method} request.".format(method=request.method),
extra=dict(request=id(request), method=request.method, url=request.url))
start = time.time()
request.headers['Date'] = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') # Ref: Application 2.i.b.
request.headers['X-Service'] = self.identity # Ref: Application 2.ii.a.
if request.body is None: # We need at least an empty string to avoid errors later.
request.body = ''
canon = self.CANONICAL_REQUEST_STRUCTURE.format(method=request.method, r=request).encode('utf-8')
request.headers['X-Signature'] = hexlify(self.private.sign(canon)) # Ref: Application 2.ii.b.
if __debug__:
duration = time.time() - start
log.debug("Signing of HTTP {method} request took {time} seconds.".format(
method = request.method,
time = duration
), extra = dict(request=id(request), method=request.method, url=request.url, duration=duration))
request.register_hook('response', self.validate)
return request
def validate(self, response, *args, **kw):
if response.status_code != codes.ok:
if __debug__:
log.debug("Skipping validation of non-200 response.")
return
if 'X-Signature' not in response.headers:
raise BadSignatureError("No signature present in resopnse to signed request.")
if __debug__:
log.debug("Validating response signature.", extra=dict(
request = id(response.request),
method = response.request.method,
url = response.request.url,
signature = response.headers['X-Signature'],
))
canon = self.CANONICAL_RESPONSE_STRUCTURE.format(identity=self.identity, r=response, date=response.headers['Date'])
date = datetime.strptime(response.headers['Date'], '%a, %d %b %Y %H:%M:%S GMT')
if datetime.utcnow() - date > timedelta(seconds=30): # Ref: Application 2.i.b.
log.warning("Rejecting stale response.", extra=dict(
request=id(response.request), method=response.request.method, url=response.request.url))
raise BadSignatureError("Rejecting stale response.")
# We allow responses 1s from the future to account for slight clock skew.
if datetime.utcnow() - date < timedelta(seconds=-1):
log.warning("Received a request from the future; please check system time NTP synchronization.")
raise BadSignatureError("Rejecting message from the future.") # Einstein says, "No."
# Raises an exception on failure.
try:
self.public.verify(
unhexlify(response.headers['X-Signature'].encode('utf-8')),
canon.encode('utf-8'),
hashfunc=sha256
)
except BadSignatureError:
# Try verifying again with the time adjusted by one second.
date = (date - timedelta(seconds=1)).strftime('%a, %d %b %Y %H:%M:%S GMT')
canon = self.CANONICAL_RESPONSE_STRUCTURE.format(identity=self.identity, r=response, date=date)
self.public.verify(
unhexlify(response.headers['X-Signature'].encode('utf-8')),
canon.encode('utf-8'),
hashfunc=sha256
)
Corrections to signature and key verification from djdduty/patch-1
# encoding: utf-8
from __future__ import unicode_literals
if __debug__:
import time
from datetime import datetime, timedelta
from requests import codes
from requests.auth import AuthBase
# Protocol-Mandated Imports
from binascii import hexlify, unhexlify
from hashlib import sha256
from ecdsa import SigningKey, VerifyingKey, NIST256p
from ecdsa.keys import BadSignatureError
log = __import__('logging').getLogger(__name__)
try:
unicode = unicode
str = str
except:
unicode = str
str = bytes
class SignedAuth(AuthBase):
CANONICAL_REQUEST_STRUCTURE = "{r.method}\n{r.headers[date]}\n{r.url}\n{r.body}" # Ref: Application 2.i.
CANONICAL_RESPONSE_STRUCTURE = "{identity}\n{r.request.method}\n{date}\n" \
"{r.request.url}\n{r.text}" # Ref: Server 4.ii.
def __init__(self, identity, private, public):
"""Configure HTDSA signed request/response authentication.
To perform the cryptographic operations required for the HTDSA protocol you must pass in either instances of
`ecdsa` signing and verifying keys, or their hex-encoded versions which will be converted automatically.
Additionally, the identity token (opaque identifier) assigned to your client application by the provider will
need to be passed in so we can identify ourselves.
The private key is your application's private key. The public key is the provider's service key you were given
when registering your application.
"""
self.identity = identity
self.private = SigningKey.from_string(unhexlify(private), NIST256p) if isinstance(private, (str, unicode)) else private
self.public = VerifyingKey.from_string(unhexlify(public), NIST256p) if isinstance(public, (str, unicode)) else public
def __call__(self, request):
if __debug__:
log.debug("Signing HTTP {method} request.".format(method=request.method),
extra=dict(request=id(request), method=request.method, url=request.url))
start = time.time()
request.headers['Date'] = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') # Ref: Application 2.i.b.
request.headers['X-Service'] = self.identity # Ref: Application 2.ii.a.
if request.body is None: # We need at least an empty string to avoid errors later.
request.body = ''
canon = self.CANONICAL_REQUEST_STRUCTURE.format(method=request.method, r=request).encode('utf-8')
request.headers['X-Signature'] = hexlify(self.private.sign(canon, hashfunc=sha256)) # Ref: Application 2.ii.b.
if __debug__:
duration = time.time() - start
log.debug("Signing of HTTP {method} request took {time} seconds.".format(
method = request.method,
time = duration
), extra = dict(request=id(request), method=request.method, url=request.url, duration=duration))
request.register_hook('response', self.validate)
return request
def validate(self, response, *args, **kw):
if response.status_code != codes.ok:
if __debug__:
log.debug("Skipping validation of non-200 response.")
return
if 'X-Signature' not in response.headers:
raise BadSignatureError("No signature present in resopnse to signed request.")
if __debug__:
log.debug("Validating response signature.", extra=dict(
request = id(response.request),
method = response.request.method,
url = response.request.url,
signature = response.headers['X-Signature'],
))
canon = self.CANONICAL_RESPONSE_STRUCTURE.format(identity=self.identity, r=response, date=response.headers['Date'])
date = datetime.strptime(response.headers['Date'], '%a, %d %b %Y %H:%M:%S GMT')
if datetime.utcnow() - date > timedelta(seconds=30): # Ref: Application 2.i.b.
log.warning("Rejecting stale response.", extra=dict(
request=id(response.request), method=response.request.method, url=response.request.url))
raise BadSignatureError("Rejecting stale response.")
# We allow responses 1s from the future to account for slight clock skew.
if datetime.utcnow() - date < timedelta(seconds=-1):
log.warning("Received a request from the future; please check system time NTP synchronization.")
raise BadSignatureError("Rejecting message from the future.") # Einstein says, "No."
# Raises an exception on failure.
try:
self.public.verify(
unhexlify(response.headers['X-Signature'].encode('utf-8')),
canon.encode('utf-8'),
hashfunc=sha256
)
except BadSignatureError:
# Try verifying again with the time adjusted by one second.
date = (date - timedelta(seconds=1)).strftime('%a, %d %b %Y %H:%M:%S GMT')
canon = self.CANONICAL_RESPONSE_STRUCTURE.format(identity=self.identity, r=response, date=date)
self.public.verify(
unhexlify(response.headers['X-Signature'].encode('utf-8')),
canon.encode('utf-8'),
hashfunc=sha256
)
|
from ethereum import ethash, ethash_utils, utils
import time
import sys
import sha3
import warnings
from collections import OrderedDict
from ethereum.slogging import get_logger
log = get_logger('eth.pow')
if sys.version_info.major == 2:
from repoze.lru import lru_cache
else:
from functools import lru_cache
try:
import pyethash
ETHASH_LIB = 'pyethash' # the C++ based implementation
except ImportError:
ETHASH_LIB = 'ethash'
warnings.warn('using pure python implementation', ImportWarning)
if ETHASH_LIB == 'ethash':
mkcache = ethash.mkcache
EPOCH_LENGTH = ethash_utils.EPOCH_LENGTH
hashimoto_light = ethash.hashimoto_light
elif ETHASH_LIB == 'pyethash':
mkcache = pyethash.mkcache_bytes
EPOCH_LENGTH = pyethash.EPOCH_LENGTH
hashimoto_light = lambda s, c, h, n: \
pyethash.hashimoto_light(s, c, h, utils.big_endian_to_int(n))
else:
raise Exception("invalid ethash library set")
TT64M1 = 2**64 - 1
cache_seeds = ['\x00' * 32]
cache_by_seed = OrderedDict()
cache_by_seed.max_items = 10
def get_cache(block_number):
while len(cache_seeds) <= block_number // EPOCH_LENGTH:
cache_seeds.append(sha3.sha3_256(cache_seeds[-1]).digest())
seed = cache_seeds[block_number // EPOCH_LENGTH]
if seed in cache_by_seed:
c = cache_by_seed.pop(seed) # pop and append at end
cache_by_seed[seed] = c
return c
c = mkcache(block_number)
cache_by_seed[seed] = c
if len(cache_by_seed) > cache_by_seed.max_items:
cache_by_seed.pop(cache_by_seed.keys()[0]) # remove last recently accessed
return c
@lru_cache(maxsize=32)
def check_pow(block_number, header_hash, mixhash, nonce, difficulty):
"""Check if the proof-of-work of the block is valid.
:param nonce: if given the proof of work function will be evaluated
with this nonce instead of the one already present in
the header
:returns: `True` or `False`
"""
log.debug('checking pow', block_number=block_number)
if len(mixhash) != 32 or len(header_hash) != 32 or len(nonce) != 8:
return False
# Grab current cache
cache = get_cache(block_number)
mining_output = hashimoto_light(block_number, cache, header_hash, nonce)
if mining_output[b'mix digest'] != mixhash:
return False
return utils.big_endian_to_int(mining_output[b'result']) <= 2**256 // (difficulty or 1)
class Miner():
"""
Mines on the current head
Stores received transactions
The process of finalising a block involves four stages:
1) Validate (or, if mining, determine) uncles;
2) validate (or, if mining, determine) transactions;
3) apply rewards;
4) verify (or, if mining, compute a valid) state and nonce.
:param block: the block for which to find a valid nonce
"""
def __init__(self, block):
self.nonce = 0
self.block = block
log.debug('mining', block_number=self.block.number,
block_hash=utils.encode_hex(self.block.hash),
block_difficulty=self.block.difficulty)
def mine(self, rounds=1000, start_nonce=0):
blk = self.block
bin_nonce, mixhash = mine(blk.number, blk.difficulty, blk.mining_hash,
start_nonce=start_nonce, rounds=rounds)
if bin_nonce:
blk.mixhash = mixhash
blk.nonce = bin_nonce
return blk
def mine(block_number, difficulty, mining_hash, start_nonce=0, rounds=1000):
assert utils.isnumeric(start_nonce)
cache = get_cache(block_number)
nonce = start_nonce
target = utils.zpad(utils.int_to_big_endian(2**256 // (difficulty or 1)), 32)
for i in range(1, rounds + 1):
bin_nonce = utils.zpad(utils.int_to_big_endian((nonce + i) & TT64M1), 8)
o = hashimoto_light(block_number, cache, mining_hash, bin_nonce)
if o[b"result"] <= target:
log.debug("nonce found")
assert len(bin_nonce) == 8
assert len(o[b"mix digest"]) == 32
return bin_nonce, o[b"mix digest"]
return None, None
refs #295, use pycryptodome for sha3_256 in ethpow
from ethereum import ethash, ethash_utils, utils
import time
import sys
import warnings
from collections import OrderedDict
from ethereum import utils
from ethereum.slogging import get_logger
log = get_logger('eth.pow')
if sys.version_info.major == 2:
from repoze.lru import lru_cache
else:
from functools import lru_cache
try:
import pyethash
ETHASH_LIB = 'pyethash' # the C++ based implementation
except ImportError:
ETHASH_LIB = 'ethash'
warnings.warn('using pure python implementation', ImportWarning)
if ETHASH_LIB == 'ethash':
mkcache = ethash.mkcache
EPOCH_LENGTH = ethash_utils.EPOCH_LENGTH
hashimoto_light = ethash.hashimoto_light
elif ETHASH_LIB == 'pyethash':
mkcache = pyethash.mkcache_bytes
EPOCH_LENGTH = pyethash.EPOCH_LENGTH
hashimoto_light = lambda s, c, h, n: \
pyethash.hashimoto_light(s, c, h, utils.big_endian_to_int(n))
else:
raise Exception("invalid ethash library set")
TT64M1 = 2**64 - 1
cache_seeds = ['\x00' * 32]
cache_by_seed = OrderedDict()
cache_by_seed.max_items = 10
def get_cache(block_number):
while len(cache_seeds) <= block_number // EPOCH_LENGTH:
cache_seeds.append(utils.sha3(cache_seeds[-1]))
seed = cache_seeds[block_number // EPOCH_LENGTH]
if seed in cache_by_seed:
c = cache_by_seed.pop(seed) # pop and append at end
cache_by_seed[seed] = c
return c
c = mkcache(block_number)
cache_by_seed[seed] = c
if len(cache_by_seed) > cache_by_seed.max_items:
cache_by_seed.pop(cache_by_seed.keys()[0]) # remove last recently accessed
return c
@lru_cache(maxsize=32)
def check_pow(block_number, header_hash, mixhash, nonce, difficulty):
"""Check if the proof-of-work of the block is valid.
:param nonce: if given the proof of work function will be evaluated
with this nonce instead of the one already present in
the header
:returns: `True` or `False`
"""
log.debug('checking pow', block_number=block_number)
if len(mixhash) != 32 or len(header_hash) != 32 or len(nonce) != 8:
return False
# Grab current cache
cache = get_cache(block_number)
mining_output = hashimoto_light(block_number, cache, header_hash, nonce)
if mining_output[b'mix digest'] != mixhash:
return False
return utils.big_endian_to_int(mining_output[b'result']) <= 2**256 // (difficulty or 1)
class Miner():
"""
Mines on the current head
Stores received transactions
The process of finalising a block involves four stages:
1) Validate (or, if mining, determine) uncles;
2) validate (or, if mining, determine) transactions;
3) apply rewards;
4) verify (or, if mining, compute a valid) state and nonce.
:param block: the block for which to find a valid nonce
"""
def __init__(self, block):
self.nonce = 0
self.block = block
log.debug('mining', block_number=self.block.number,
block_hash=utils.encode_hex(self.block.hash),
block_difficulty=self.block.difficulty)
def mine(self, rounds=1000, start_nonce=0):
blk = self.block
bin_nonce, mixhash = mine(blk.number, blk.difficulty, blk.mining_hash,
start_nonce=start_nonce, rounds=rounds)
if bin_nonce:
blk.mixhash = mixhash
blk.nonce = bin_nonce
return blk
def mine(block_number, difficulty, mining_hash, start_nonce=0, rounds=1000):
assert utils.isnumeric(start_nonce)
cache = get_cache(block_number)
nonce = start_nonce
target = utils.zpad(utils.int_to_big_endian(2**256 // (difficulty or 1)), 32)
for i in range(1, rounds + 1):
bin_nonce = utils.zpad(utils.int_to_big_endian((nonce + i) & TT64M1), 8)
o = hashimoto_light(block_number, cache, mining_hash, bin_nonce)
if o[b"result"] <= target:
log.debug("nonce found")
assert len(bin_nonce) == 8
assert len(o[b"mix digest"]) == 32
return bin_nonce, o[b"mix digest"]
return None, None
|
# Sublime modelines - https://github.com/SublimeText/Modelines
# sublime: translate_tabs_to_spaces false; rulers [100,120]
from gosubl import about
from subprocess import Popen, PIPE
import copy
import datetime
import json
import locale
import os
import re
import string
import sublime
import subprocess
import sys
import tempfile
import threading
import traceback as tbck
import uuid
try:
import Queue as queue
except ImportError:
import queue
PY3K = (sys.version_info[0] == 3)
penc = locale.getpreferredencoding()
try_encodings = ['utf-8']
if penc.lower() not in try_encodings:
try_encodings.append(penc)
if PY3K:
str_decode = lambda s, enc, errs: str(s, enc, errors=errs)
else:
str_decode = lambda s, enc, errs: str(s).decode(enc, errs)
try:
STARTUP_INFO = subprocess.STARTUPINFO()
STARTUP_INFO.dwFlags |= subprocess.STARTF_USESHOWWINDOW
STARTUP_INFO.wShowWindow = subprocess.SW_HIDE
except (AttributeError):
STARTUP_INFO = None
NAME = 'GoSublime'
mg9_send_q = queue.Queue()
mg9_recv_q = queue.Queue()
_attr_lck = threading.Lock()
_attr = {}
_checked_lck = threading.Lock()
_checked = {}
environ9 = {}
_env_lck = threading.Lock()
_default_settings = {
"margo_oom": 0,
"_debug": False,
"env": {},
"gscomplete_enabled": False,
"complete_builtins": False,
"autocomplete_builtins": False,
"fmt_enabled": False,
"fmt_tab_indent": True,
"fmt_tab_width": 8,
"fmt_cmd": [],
"gslint_enabled": False,
"comp_lint_enabled": False,
"comp_lint_commands": [],
"gslint_timeout": 0,
"autocomplete_live_hint": False,
"autocomplete_snippets": False,
"autocomplete_tests": False,
"autocomplete_closures": False,
"autocomplete_filter_name": "",
"autocomplete_suggest_imports": False,
"on_save": [],
"shell": [],
"shell_pathsep": "",
"default_snippets": [],
"snippets": [],
"fn_exclude_prefixes": [".", "_"],
"autosave": True,
"build_command": [],
"lint_filter": [],
"lint_enbled": True,
"linters": [],
"9o_instance": "",
"9o_color_scheme": "",
"9o_settings": {},
"9o_aliases": {},
"9o_show_end": False,
"gohtml_extensions": [],
"autoinst": False,
"use_gs_gopath": False,
"use_named_imports": False,
}
_settings = copy.copy(_default_settings)
CLASS_PREFIXES = {
'const': u'\u0196',
'func': u'\u0192',
'type': u'\u0288',
'var': u'\u03BD',
'package': u'package \u03C1',
}
NAME_PREFIXES = {
'interface': u'\u00A1',
}
GOARCHES = [
'386',
'amd64',
'arm',
]
GOOSES = [
'darwin',
'freebsd',
'linux',
'netbsd',
'openbsd',
'plan9',
'windows',
'unix',
]
GOOSARCHES = []
for s in GOOSES:
for arch in GOARCHES:
GOOSARCHES.append('%s_%s' % (s, arch))
GOOSARCHES_PAT = re.compile(r'^(.+?)(?:_(%s))?(?:_(%s))?\.go$' % ('|'.join(GOOSES), '|'.join(GOARCHES)))
IGNORED_SCOPES = frozenset([
'string.quoted.double.go',
'string.quoted.single.go',
'string.quoted.raw.go',
'comment.line.double-slash.go',
'comment.block.go',
# gs-next
'comment.block.go',
'comment.line.double-slash.go',
'string.quoted.double.go',
'string.quoted.raw.go',
'constant.other.rune.go',
])
VFN_ID_PAT = re.compile(r'^(?:gs\.)?view://(\d+)(.*?)$', re.IGNORECASE)
ROWCOL_PAT = re.compile(r'^[:]*(\d+)(?:[:](\d+))?[:]*$')
USER_DIR = os.path.expanduser('~')
USER_DIR_PAT = re.compile(r'^%s/' % (re.escape(USER_DIR.replace('\\', '/').rstrip('/'))))
def simple_fn(fn):
return USER_DIR_PAT.sub('~/', '%s/' % fn.replace('\\', '/').rstrip('/'))
def getwd():
if PY3K:
return os.getcwd()
return os.getcwdu()
def apath(fn, cwd=None):
if not os.path.isabs(fn):
if not cwd:
cwd = getwd()
fn = os.path.join(cwd, fn)
return os.path.normcase(os.path.normpath(fn))
def temp_dir(subdir=''):
tmpdir = os.path.join(tempfile.gettempdir(), NAME, subdir)
err = ''
try:
os.makedirs(tmpdir)
except Exception as ex:
err = str(ex)
return (tmpdir, err)
def temp_file(suffix='', prefix='', delete=True):
try:
f = tempfile.NamedTemporaryFile(suffix=suffix, prefix=prefix, dir=temp_dir(), delete=delete)
except Exception as ex:
return (None, 'Error: %s' % ex)
return (f, '')
def basedir_or_cwd(fn):
if fn and not fn.startswith('gs.view://'):
return os.path.dirname(fn)
return getwd()
def popen(args, stdout=PIPE, stderr=PIPE, shell=False, environ={}, cwd=None, bufsize=0):
ev = env()
for k,v in environ.items():
ev[astr(k)] = astr(v)
try:
setsid = os.setsid
except Exception:
setsid = None
return Popen(args, stdout=stdout, stderr=stderr, stdin=PIPE, startupinfo=STARTUP_INFO,
shell=shell, env=ev, cwd=cwd, preexec_fn=setsid, bufsize=bufsize)
def is_a(v, base):
return isinstance(v, type(base))
def is_a_string(v):
try:
return isinstance(v, basestring)
except NameError:
return isinstance(v, str)
def settings_obj():
return sublime.load_settings("GoSublime.sublime-settings")
def aso():
return sublime.load_settings("GoSublime-aux.sublime-settings")
def save_aso():
return sublime.save_settings("GoSublime-aux.sublime-settings")
def settings_dict():
m = copy.copy(_settings)
for k in m:
v = attr(k, None)
if v is not None:
m[k] = v
nv = dval(copy.copy(_settings.get('env')), {})
lpe = dval(attr('last_active_project_settings', {}).get('env'), {})
nv.update(lpe)
m['env'] = nv
return m
def setting(k, d=None):
return settings_dict().get(k, d)
def println(*a):
l = []
l.append('\n** %s **:' % datetime.datetime.now())
for s in a:
l.append(ustr(s).strip())
l.append('--------------------------------')
l = '%s\n' % '\n'.join(l)
print(l)
return l
def debug(domain, *a):
if setting('_debug') is True:
print('\n** DEBUG ** %s ** %s **:' % (domain, datetime.datetime.now()))
for s in a:
print(ustr(s).strip())
print('--------------------------------')
def log(*a):
try:
LOGFILE.write(println(*a))
LOGFILE.flush()
except Exception:
pass
def notify(domain, txt):
txt = "%s: %s" % (domain, txt)
status_message(txt)
def notice(domain, txt):
error(domain, txt)
def error(domain, txt):
txt = "%s: %s" % (domain, txt)
log(txt)
status_message(txt)
def error_traceback(domain, status_txt=''):
tb = traceback().strip()
if status_txt:
prefix = '%s\n' % status_txt
else:
prefix = ''
i = tb.rfind('\n')
if i > 0:
status_txt = tb[i:].strip()
else:
status_txt = tb
log("%s: %s%s" % (domain, prefix, tb))
status_message("%s: %s" % (domain, status_txt))
def notice_undo(domain, txt, view, should_undo):
def cb():
if should_undo:
view.run_command('undo')
notice(domain, txt)
sublime.set_timeout(cb, 0)
def show_output(domain, s, print_output=True, syntax_file='', replace=True, merge_domain=False, scroll_end=False):
def cb(domain, s, print_output, syntax_file):
panel_name = '%s-output' % domain
if merge_domain:
s = '%s: %s' % (domain, s)
if print_output:
println(s)
elif print_output:
println('%s: %s' % (domain, s))
win = sublime.active_window()
if win:
win.get_output_panel(panel_name).run_command('gs_set_output_panel_content', {
'content': s,
'syntax_file': syntax_file,
'scroll_end': scroll_end,
'replace': replace,
})
win.run_command("show_panel", {"panel": "output.%s" % panel_name})
sublime.set_timeout(lambda: cb(domain, s, print_output, syntax_file), 0)
def is_pkg_view(view=None):
# todo implement this fully
return is_go_source_view(view, False)
def is_go_source_view(view=None, strict=True):
if view is None:
return False
selector_match = view.score_selector(sel(view).begin(), 'source.go') > 0
if selector_match:
return True
if strict:
return False
fn = view.file_name() or ''
return fn.lower().endswith('.go')
def active_valid_go_view(win=None, strict=True):
if not win:
win = sublime.active_window()
if win:
view = win.active_view()
if view and is_go_source_view(view, strict):
return view
return None
def rowcol(view):
return view.rowcol(sel(view).begin())
def os_is_windows():
return os.name == "nt"
def getenv(name, default='', m={}):
return env(m).get(name, default)
def env(m={}):
"""
Assemble environment information needed for correct operation. In particular,
ensure that directories containing binaries are included in PATH.
"""
e = os.environ.copy()
e.update(environ9)
e.update(m)
roots = lst(e.get('GOPATH', '').split(os.pathsep), e.get('GOROOT', ''))
lfn = attr('last_active_go_fn', '')
comps = lfn.split(os.sep)
gs_gopath = []
for i, s in enumerate(comps):
if s.lower() == "src":
p = os.sep.join(comps[:i])
if p not in roots:
gs_gopath.append(p)
gs_gopath.reverse()
e['GS_GOPATH'] = os.pathsep.join(gs_gopath)
uenv = setting('env', {})
for k in uenv:
try:
uenv[k] = string.Template(uenv[k]).safe_substitute(e)
except Exception as ex:
println('%s: Cannot expand env var `%s`: %s' % (NAME, k, ex))
e.update(uenv)
e.update(m)
# For custom values of GOPATH, installed binaries via go install
# will go into the "bin" dir of the corresponding GOPATH path.
# Therefore, make sure these paths are included in PATH.
add_path = [home_dir_path('bin')]
for s in lst(e.get('GOROOT', ''), e.get('GOPATH', '').split(os.pathsep)):
if s:
s = os.path.join(s, 'bin')
if s not in add_path:
add_path.append(s)
gobin = e.get('GOBIN', '')
if gobin and gobin not in add_path:
add_path.append(gobin)
if os_is_windows():
l = [
'~\\bin',
'~\\go\\bin',
'C:\\Go\\bin',
]
else:
l = [
'~/bin',
'~/go/bin',
'/usr/local/go/bin',
'/usr/local/opt/go/bin',
'/usr/local/bin',
'/usr/bin',
]
for s in l:
s = os.path.expanduser(s)
if s not in add_path:
add_path.append(s)
for s in e.get('PATH', '').split(os.pathsep):
if s and s not in add_path:
add_path.append(s)
e['PATH'] = os.pathsep.join(add_path)
# Ensure no unicode objects leak through. The reason is twofold:
# * On Windows, Python 2.6 (used by Sublime Text) subprocess.Popen
# can only take bytestrings as environment variables in the
# "env" parameter. Reference:
# https://github.com/DisposaBoy/GoSublime/issues/112
# http://stackoverflow.com/q/12253014/1670
# * Avoids issues with networking too.
clean_env = {}
for k, v in e.items():
try:
clean_env[astr(k)] = astr(v)
except Exception as ex:
println('%s: Bad env: %s' % (NAME, ex))
return clean_env
def mirror_settings(so):
m = {}
for k in _default_settings:
v = so.get(k, None)
if v is not None:
ok = False
d = _default_settings[k]
if is_a(d, []):
if is_a(v, []):
ok = True
elif is_a(d, {}):
if is_a(v, []):
ok = True
else:
ok = True
m[k] = copy.copy(v)
return m
def sync_settings():
_settings.update(mirror_settings(settings_obj()))
def view_fn(view):
if view is not None:
if view.file_name():
return view.file_name()
return 'gs.view://%s' % view.id()
return ''
def view_src(view):
if view:
return view.substr(sublime.Region(0, view.size()))
return ''
def win_view(vfn=None, win=None):
if not win:
win = sublime.active_window()
view = None
if win:
m = VFN_ID_PAT.match(vfn or '')
if m:
try:
vid = int(m.group(1))
for v in win.views():
if v.id() == vid:
view = v
break
except Exception:
gs.error_traceback(NAME)
elif not vfn or vfn == "<stdin>":
view = win.active_view()
else:
view = win.open_file(vfn)
return (win, view)
def do_focus(fn, row, col, win, focus_pat, cb):
win, view = win_view(fn, win)
if win is None or view is None:
notify(NAME, 'Cannot find file position %s:%s:%s' % (fn, row, col))
if cb:
cb(False)
elif view.is_loading():
focus(fn, row=row, col=col, win=win, focus_pat=focus_pat, cb=cb)
else:
win.focus_view(view)
if row <= 0 and col <= 0 and focus_pat:
r = view.find(focus_pat, 0)
if r:
row, col = view.rowcol(r.begin())
view.run_command("gs_goto_row_col", { "row": row, "col": col })
if cb:
cb(True)
def focus(fn, row=0, col=0, win=None, timeout=100, focus_pat='^package ', cb=None):
sublime.set_timeout(lambda: do_focus(fn, row, col, win, focus_pat, cb), timeout)
def sm_cb():
global sm_text
global sm_set_text
global sm_frame
with sm_lck:
ntasks = len(sm_tasks)
tm = sm_tm
s = sm_text
if s:
delta = (datetime.datetime.now() - tm)
if delta.seconds >= 10:
sm_text = ''
if ntasks > 0:
if s:
s = u'%s, %s' % (sm_frames[sm_frame], s)
else:
s = u'%s' % sm_frames[sm_frame]
if ntasks > 1:
s = '%d %s' % (ntasks, s)
sm_frame = (sm_frame + 1) % len(sm_frames)
if s != sm_set_text:
sm_set_text = s
st2_status_message(s)
sched_sm_cb()
def sched_sm_cb():
sublime.set_timeout(sm_cb, 250)
def status_message(s):
global sm_text
global sm_tm
with sm_lck:
sm_text = s
sm_tm = datetime.datetime.now()
def begin(domain, message, set_status=True, cancel=None):
global sm_task_counter
if message and set_status:
status_message('%s: %s' % (domain, message))
with sm_lck:
sm_task_counter += 1
tid = 't%d' % sm_task_counter
sm_tasks[tid] = {
'start': datetime.datetime.now(),
'domain': domain,
'message': message,
'cancel': cancel,
}
return tid
def end(task_id):
with sm_lck:
try:
del(sm_tasks[task_id])
except:
pass
def task(task_id, default=None):
with sm_lck:
return sm_tasks.get(task_id, default)
def clear_tasks():
with sm_lck:
sm_tasks = {}
def task_list():
with sm_lck:
return sorted(sm_tasks.items())
def cancel_task(tid):
t = task(tid)
if t and t['cancel']:
s = 'are you sure you want to end task: #%s %s: %s' % (tid, t['domain'], t['message'])
if sublime.ok_cancel_dialog(s):
t['cancel']()
return True
return False
def show_quick_panel(items, cb=None):
def f():
win = sublime.active_window()
if win is not None:
if callable(cb):
f = lambda i: cb(i, win)
else:
f = lambda i: None
win.show_quick_panel(items, f)
sublime.set_timeout(f, 0)
def go_env_goroot():
out, _, _ = runcmd(['go env GOROOT'], shell=True)
return out.strip().encode('utf-8')
def list_dir_tree(dirname, filter, exclude_prefix=('.', '_')):
lst = []
try:
for fn in os.listdir(dirname):
if fn[0] in exclude_prefix:
continue
basename = fn.lower()
fn = os.path.join(dirname, fn)
if os.path.isdir(fn):
lst.extend(list_dir_tree(fn, filter, exclude_prefix))
else:
if filter:
pathname = fn.lower()
_, ext = os.path.splitext(basename)
ext = ext.lstrip('.')
if filter(pathname, basename, ext):
lst.append(fn)
else:
lst.append(fn)
except Exception:
pass
return lst
def traceback(domain='GoSublime'):
return '%s: %s' % (domain, tbck.format_exc())
def show_traceback(domain):
show_output(domain, traceback(), replace=False, merge_domain=False)
def maybe_unicode_str(s):
try:
return isinstance(s, unicode)
except NameError:
return isinstance(s, str)
def ustr(s):
if maybe_unicode_str(s):
return s
for e in try_encodings:
try:
return str_decode(s, e, 'strict')
except UnicodeDecodeError:
continue
return str_decode(s, 'utf-8', 'replace')
def astr(s):
if maybe_unicode_str(s):
if PY3K:
return s
return s.encode('utf-8')
return str(s)
def lst(*a):
l = []
for v in a:
if is_a([], v):
l.extend(v)
else:
l.append(v)
return l
def dval(v, d):
if v is not None:
if is_a_string(d) and is_a_string(v):
return v
if is_a(v, d):
return v
return d
def tm_path(name):
d = {
'9o': 'syntax/GoSublime-9o.tmLanguage',
'doc': 'GsDoc.hidden-tmLanguage',
'go': 'syntax/GoSublime-Go.tmLanguage',
'gohtml': 'syntax/GoSublime-HTML.tmLanguage',
}
try:
so = sublime.load_settings('GoSublime-next.sublime-settings')
if 'go' in so.get('extensions', []):
d['go'] = 'GoSublime-next.tmLanguage'
except Exception:
pass
return 'Packages/GoSublime/%s' % d[name]
def packages_dir():
fn = attr('gs.packages_dir')
if not fn:
fn = sublime.packages_path()
set_attr('gs.packages_dir', fn)
return fn
def dist_path(*a):
return os.path.join(packages_dir(), 'GoSublime', *a)
def mkdirp(fn):
try:
os.makedirs(fn)
except:
pass
def _home_path(*a):
return os.path.join(packages_dir(), 'User', 'GoSublime', about.PLATFORM, *a)
def home_dir_path(*a):
fn = _home_path(*a)
mkdirp(fn)
return fn
def home_path(*a):
fn = _home_path(*a)
mkdirp(os.path.dirname(fn))
return fn
def json_decode(s, default):
try:
res = json.loads(s)
if is_a(res, default):
return (res, '')
return (res, 'Unexpected value type')
except Exception as ex:
return (default, 'Decode Error: %s' % ex)
def json_encode(a):
try:
return (json.dumps(a), '')
except Exception as ex:
return ('', 'Encode Error: %s' % ex)
def attr(k, d=None):
with _attr_lck:
v = _attr.get(k, None)
return d if v is None else copy.copy(v)
def set_attr(k, v):
with _attr_lck:
_attr[k] = v
def del_attr(k):
with _attr_lck:
try:
v = _attr[k]
except Exception:
v = None
try:
del _attr[k]
except Exception:
pass
return v
# note: this functionality should not be used inside this module
# continue to use the try: X except: X=Y hack
def checked(domain, k):
with _checked_lck:
k = 'common.checked.%s.%s' % (domain, k)
v = _checked.get(k, False)
_checked[k] = True
return v
def sel(view, i=0):
try:
s = view.sel()
if s is not None and i < len(s):
return s[i]
except Exception:
pass
return sublime.Region(0, 0)
def which_ok(fn):
try:
return os.path.isfile(fn) and os.access(fn, os.X_OK)
except Exception:
return False
def which(cmd):
if os.path.isabs(cmd):
return cmd if which_ok(cmd) else ''
# not supporting PATHEXT. period.
if os_is_windows():
cmd = '%s.exe' % cmd
seen = {}
for p in getenv('PATH', '').split(os.pathsep):
p = os.path.join(p, cmd)
if p not in seen and which_ok(p):
return p
seen[p] = True
return ''
try:
st2_status_message
except:
sm_lck = threading.Lock()
sm_task_counter = 0
sm_tasks = {}
sm_frame = 0
sm_frames = (
u'\u25D2',
u'\u25D1',
u'\u25D3',
u'\u25D0'
)
sm_tm = datetime.datetime.now()
sm_text = ''
sm_set_text = ''
st2_status_message = sublime.status_message
sublime.status_message = status_message
DEVNULL = open(os.devnull, 'w')
LOGFILE = DEVNULL
try:
gs9o
except Exception:
gs9o = {}
def gs_init(m={}):
global LOGFILE
try:
LOGFILE = open(home_path('log.txt'), 'a+')
except Exception as ex:
LOGFILE = DEVNULL
notice(NAME, 'Cannot create log file. Remote(margo) and persistent logging will be disabled. Error: %s' % ex)
sched_sm_cb()
settings_obj().clear_on_change("GoSublime.settings")
settings_obj().add_on_change("GoSublime.settings", sync_settings)
sync_settings()
* use a monospace font for the quick panels to make items line up
# Sublime modelines - https://github.com/SublimeText/Modelines
# sublime: translate_tabs_to_spaces false; rulers [100,120]
from gosubl import about
from subprocess import Popen, PIPE
import copy
import datetime
import json
import locale
import os
import re
import string
import sublime
import subprocess
import sys
import tempfile
import threading
import traceback as tbck
import uuid
try:
import Queue as queue
except ImportError:
import queue
PY3K = (sys.version_info[0] == 3)
penc = locale.getpreferredencoding()
try_encodings = ['utf-8']
if penc.lower() not in try_encodings:
try_encodings.append(penc)
if PY3K:
str_decode = lambda s, enc, errs: str(s, enc, errors=errs)
else:
str_decode = lambda s, enc, errs: str(s).decode(enc, errs)
try:
STARTUP_INFO = subprocess.STARTUPINFO()
STARTUP_INFO.dwFlags |= subprocess.STARTF_USESHOWWINDOW
STARTUP_INFO.wShowWindow = subprocess.SW_HIDE
except (AttributeError):
STARTUP_INFO = None
NAME = 'GoSublime'
mg9_send_q = queue.Queue()
mg9_recv_q = queue.Queue()
_attr_lck = threading.Lock()
_attr = {}
_checked_lck = threading.Lock()
_checked = {}
environ9 = {}
_env_lck = threading.Lock()
_default_settings = {
"margo_oom": 0,
"_debug": False,
"env": {},
"gscomplete_enabled": False,
"complete_builtins": False,
"autocomplete_builtins": False,
"fmt_enabled": False,
"fmt_tab_indent": True,
"fmt_tab_width": 8,
"fmt_cmd": [],
"gslint_enabled": False,
"comp_lint_enabled": False,
"comp_lint_commands": [],
"gslint_timeout": 0,
"autocomplete_live_hint": False,
"autocomplete_snippets": False,
"autocomplete_tests": False,
"autocomplete_closures": False,
"autocomplete_filter_name": "",
"autocomplete_suggest_imports": False,
"on_save": [],
"shell": [],
"shell_pathsep": "",
"default_snippets": [],
"snippets": [],
"fn_exclude_prefixes": [".", "_"],
"autosave": True,
"build_command": [],
"lint_filter": [],
"lint_enbled": True,
"linters": [],
"9o_instance": "",
"9o_color_scheme": "",
"9o_settings": {},
"9o_aliases": {},
"9o_show_end": False,
"gohtml_extensions": [],
"autoinst": False,
"use_gs_gopath": False,
"use_named_imports": False,
}
_settings = copy.copy(_default_settings)
CLASS_PREFIXES = {
'const': u'\u0196',
'func': u'\u0192',
'type': u'\u0288',
'var': u'\u03BD',
'package': u'package \u03C1',
}
NAME_PREFIXES = {
'interface': u'\u00A1',
}
GOARCHES = [
'386',
'amd64',
'arm',
]
GOOSES = [
'darwin',
'freebsd',
'linux',
'netbsd',
'openbsd',
'plan9',
'windows',
'unix',
]
GOOSARCHES = []
for s in GOOSES:
for arch in GOARCHES:
GOOSARCHES.append('%s_%s' % (s, arch))
GOOSARCHES_PAT = re.compile(r'^(.+?)(?:_(%s))?(?:_(%s))?\.go$' % ('|'.join(GOOSES), '|'.join(GOARCHES)))
IGNORED_SCOPES = frozenset([
'string.quoted.double.go',
'string.quoted.single.go',
'string.quoted.raw.go',
'comment.line.double-slash.go',
'comment.block.go',
# gs-next
'comment.block.go',
'comment.line.double-slash.go',
'string.quoted.double.go',
'string.quoted.raw.go',
'constant.other.rune.go',
])
VFN_ID_PAT = re.compile(r'^(?:gs\.)?view://(\d+)(.*?)$', re.IGNORECASE)
ROWCOL_PAT = re.compile(r'^[:]*(\d+)(?:[:](\d+))?[:]*$')
USER_DIR = os.path.expanduser('~')
USER_DIR_PAT = re.compile(r'^%s/' % (re.escape(USER_DIR.replace('\\', '/').rstrip('/'))))
def simple_fn(fn):
return USER_DIR_PAT.sub('~/', '%s/' % fn.replace('\\', '/').rstrip('/'))
def getwd():
if PY3K:
return os.getcwd()
return os.getcwdu()
def apath(fn, cwd=None):
if not os.path.isabs(fn):
if not cwd:
cwd = getwd()
fn = os.path.join(cwd, fn)
return os.path.normcase(os.path.normpath(fn))
def temp_dir(subdir=''):
tmpdir = os.path.join(tempfile.gettempdir(), NAME, subdir)
err = ''
try:
os.makedirs(tmpdir)
except Exception as ex:
err = str(ex)
return (tmpdir, err)
def temp_file(suffix='', prefix='', delete=True):
try:
f = tempfile.NamedTemporaryFile(suffix=suffix, prefix=prefix, dir=temp_dir(), delete=delete)
except Exception as ex:
return (None, 'Error: %s' % ex)
return (f, '')
def basedir_or_cwd(fn):
if fn and not fn.startswith('gs.view://'):
return os.path.dirname(fn)
return getwd()
def popen(args, stdout=PIPE, stderr=PIPE, shell=False, environ={}, cwd=None, bufsize=0):
ev = env()
for k,v in environ.items():
ev[astr(k)] = astr(v)
try:
setsid = os.setsid
except Exception:
setsid = None
return Popen(args, stdout=stdout, stderr=stderr, stdin=PIPE, startupinfo=STARTUP_INFO,
shell=shell, env=ev, cwd=cwd, preexec_fn=setsid, bufsize=bufsize)
def is_a(v, base):
return isinstance(v, type(base))
def is_a_string(v):
try:
return isinstance(v, basestring)
except NameError:
return isinstance(v, str)
def settings_obj():
return sublime.load_settings("GoSublime.sublime-settings")
def aso():
return sublime.load_settings("GoSublime-aux.sublime-settings")
def save_aso():
return sublime.save_settings("GoSublime-aux.sublime-settings")
def settings_dict():
m = copy.copy(_settings)
for k in m:
v = attr(k, None)
if v is not None:
m[k] = v
nv = dval(copy.copy(_settings.get('env')), {})
lpe = dval(attr('last_active_project_settings', {}).get('env'), {})
nv.update(lpe)
m['env'] = nv
return m
def setting(k, d=None):
return settings_dict().get(k, d)
def println(*a):
l = []
l.append('\n** %s **:' % datetime.datetime.now())
for s in a:
l.append(ustr(s).strip())
l.append('--------------------------------')
l = '%s\n' % '\n'.join(l)
print(l)
return l
def debug(domain, *a):
if setting('_debug') is True:
print('\n** DEBUG ** %s ** %s **:' % (domain, datetime.datetime.now()))
for s in a:
print(ustr(s).strip())
print('--------------------------------')
def log(*a):
try:
LOGFILE.write(println(*a))
LOGFILE.flush()
except Exception:
pass
def notify(domain, txt):
txt = "%s: %s" % (domain, txt)
status_message(txt)
def notice(domain, txt):
error(domain, txt)
def error(domain, txt):
txt = "%s: %s" % (domain, txt)
log(txt)
status_message(txt)
def error_traceback(domain, status_txt=''):
tb = traceback().strip()
if status_txt:
prefix = '%s\n' % status_txt
else:
prefix = ''
i = tb.rfind('\n')
if i > 0:
status_txt = tb[i:].strip()
else:
status_txt = tb
log("%s: %s%s" % (domain, prefix, tb))
status_message("%s: %s" % (domain, status_txt))
def notice_undo(domain, txt, view, should_undo):
def cb():
if should_undo:
view.run_command('undo')
notice(domain, txt)
sublime.set_timeout(cb, 0)
def show_output(domain, s, print_output=True, syntax_file='', replace=True, merge_domain=False, scroll_end=False):
def cb(domain, s, print_output, syntax_file):
panel_name = '%s-output' % domain
if merge_domain:
s = '%s: %s' % (domain, s)
if print_output:
println(s)
elif print_output:
println('%s: %s' % (domain, s))
win = sublime.active_window()
if win:
win.get_output_panel(panel_name).run_command('gs_set_output_panel_content', {
'content': s,
'syntax_file': syntax_file,
'scroll_end': scroll_end,
'replace': replace,
})
win.run_command("show_panel", {"panel": "output.%s" % panel_name})
sublime.set_timeout(lambda: cb(domain, s, print_output, syntax_file), 0)
def is_pkg_view(view=None):
# todo implement this fully
return is_go_source_view(view, False)
def is_go_source_view(view=None, strict=True):
if view is None:
return False
selector_match = view.score_selector(sel(view).begin(), 'source.go') > 0
if selector_match:
return True
if strict:
return False
fn = view.file_name() or ''
return fn.lower().endswith('.go')
def active_valid_go_view(win=None, strict=True):
if not win:
win = sublime.active_window()
if win:
view = win.active_view()
if view and is_go_source_view(view, strict):
return view
return None
def rowcol(view):
return view.rowcol(sel(view).begin())
def os_is_windows():
return os.name == "nt"
def getenv(name, default='', m={}):
return env(m).get(name, default)
def env(m={}):
"""
Assemble environment information needed for correct operation. In particular,
ensure that directories containing binaries are included in PATH.
"""
e = os.environ.copy()
e.update(environ9)
e.update(m)
roots = lst(e.get('GOPATH', '').split(os.pathsep), e.get('GOROOT', ''))
lfn = attr('last_active_go_fn', '')
comps = lfn.split(os.sep)
gs_gopath = []
for i, s in enumerate(comps):
if s.lower() == "src":
p = os.sep.join(comps[:i])
if p not in roots:
gs_gopath.append(p)
gs_gopath.reverse()
e['GS_GOPATH'] = os.pathsep.join(gs_gopath)
uenv = setting('env', {})
for k in uenv:
try:
uenv[k] = string.Template(uenv[k]).safe_substitute(e)
except Exception as ex:
println('%s: Cannot expand env var `%s`: %s' % (NAME, k, ex))
e.update(uenv)
e.update(m)
# For custom values of GOPATH, installed binaries via go install
# will go into the "bin" dir of the corresponding GOPATH path.
# Therefore, make sure these paths are included in PATH.
add_path = [home_dir_path('bin')]
for s in lst(e.get('GOROOT', ''), e.get('GOPATH', '').split(os.pathsep)):
if s:
s = os.path.join(s, 'bin')
if s not in add_path:
add_path.append(s)
gobin = e.get('GOBIN', '')
if gobin and gobin not in add_path:
add_path.append(gobin)
if os_is_windows():
l = [
'~\\bin',
'~\\go\\bin',
'C:\\Go\\bin',
]
else:
l = [
'~/bin',
'~/go/bin',
'/usr/local/go/bin',
'/usr/local/opt/go/bin',
'/usr/local/bin',
'/usr/bin',
]
for s in l:
s = os.path.expanduser(s)
if s not in add_path:
add_path.append(s)
for s in e.get('PATH', '').split(os.pathsep):
if s and s not in add_path:
add_path.append(s)
e['PATH'] = os.pathsep.join(add_path)
# Ensure no unicode objects leak through. The reason is twofold:
# * On Windows, Python 2.6 (used by Sublime Text) subprocess.Popen
# can only take bytestrings as environment variables in the
# "env" parameter. Reference:
# https://github.com/DisposaBoy/GoSublime/issues/112
# http://stackoverflow.com/q/12253014/1670
# * Avoids issues with networking too.
clean_env = {}
for k, v in e.items():
try:
clean_env[astr(k)] = astr(v)
except Exception as ex:
println('%s: Bad env: %s' % (NAME, ex))
return clean_env
def mirror_settings(so):
m = {}
for k in _default_settings:
v = so.get(k, None)
if v is not None:
ok = False
d = _default_settings[k]
if is_a(d, []):
if is_a(v, []):
ok = True
elif is_a(d, {}):
if is_a(v, []):
ok = True
else:
ok = True
m[k] = copy.copy(v)
return m
def sync_settings():
_settings.update(mirror_settings(settings_obj()))
def view_fn(view):
if view is not None:
if view.file_name():
return view.file_name()
return 'gs.view://%s' % view.id()
return ''
def view_src(view):
if view:
return view.substr(sublime.Region(0, view.size()))
return ''
def win_view(vfn=None, win=None):
if not win:
win = sublime.active_window()
view = None
if win:
m = VFN_ID_PAT.match(vfn or '')
if m:
try:
vid = int(m.group(1))
for v in win.views():
if v.id() == vid:
view = v
break
except Exception:
gs.error_traceback(NAME)
elif not vfn or vfn == "<stdin>":
view = win.active_view()
else:
view = win.open_file(vfn)
return (win, view)
def do_focus(fn, row, col, win, focus_pat, cb):
win, view = win_view(fn, win)
if win is None or view is None:
notify(NAME, 'Cannot find file position %s:%s:%s' % (fn, row, col))
if cb:
cb(False)
elif view.is_loading():
focus(fn, row=row, col=col, win=win, focus_pat=focus_pat, cb=cb)
else:
win.focus_view(view)
if row <= 0 and col <= 0 and focus_pat:
r = view.find(focus_pat, 0)
if r:
row, col = view.rowcol(r.begin())
view.run_command("gs_goto_row_col", { "row": row, "col": col })
if cb:
cb(True)
def focus(fn, row=0, col=0, win=None, timeout=100, focus_pat='^package ', cb=None):
sublime.set_timeout(lambda: do_focus(fn, row, col, win, focus_pat, cb), timeout)
def sm_cb():
global sm_text
global sm_set_text
global sm_frame
with sm_lck:
ntasks = len(sm_tasks)
tm = sm_tm
s = sm_text
if s:
delta = (datetime.datetime.now() - tm)
if delta.seconds >= 10:
sm_text = ''
if ntasks > 0:
if s:
s = u'%s, %s' % (sm_frames[sm_frame], s)
else:
s = u'%s' % sm_frames[sm_frame]
if ntasks > 1:
s = '%d %s' % (ntasks, s)
sm_frame = (sm_frame + 1) % len(sm_frames)
if s != sm_set_text:
sm_set_text = s
st2_status_message(s)
sched_sm_cb()
def sched_sm_cb():
sublime.set_timeout(sm_cb, 250)
def status_message(s):
global sm_text
global sm_tm
with sm_lck:
sm_text = s
sm_tm = datetime.datetime.now()
def begin(domain, message, set_status=True, cancel=None):
global sm_task_counter
if message and set_status:
status_message('%s: %s' % (domain, message))
with sm_lck:
sm_task_counter += 1
tid = 't%d' % sm_task_counter
sm_tasks[tid] = {
'start': datetime.datetime.now(),
'domain': domain,
'message': message,
'cancel': cancel,
}
return tid
def end(task_id):
with sm_lck:
try:
del(sm_tasks[task_id])
except:
pass
def task(task_id, default=None):
with sm_lck:
return sm_tasks.get(task_id, default)
def clear_tasks():
with sm_lck:
sm_tasks = {}
def task_list():
with sm_lck:
return sorted(sm_tasks.items())
def cancel_task(tid):
t = task(tid)
if t and t['cancel']:
s = 'are you sure you want to end task: #%s %s: %s' % (tid, t['domain'], t['message'])
if sublime.ok_cancel_dialog(s):
t['cancel']()
return True
return False
def show_quick_panel(items, cb=None):
def f():
win = sublime.active_window()
if win is not None:
if callable(cb):
f = lambda i: cb(i, win)
else:
f = lambda i: None
win.show_quick_panel(items, f, sublime.MONOSPACE_FONT)
sublime.set_timeout(f, 0)
def go_env_goroot():
out, _, _ = runcmd(['go env GOROOT'], shell=True)
return out.strip().encode('utf-8')
def list_dir_tree(dirname, filter, exclude_prefix=('.', '_')):
lst = []
try:
for fn in os.listdir(dirname):
if fn[0] in exclude_prefix:
continue
basename = fn.lower()
fn = os.path.join(dirname, fn)
if os.path.isdir(fn):
lst.extend(list_dir_tree(fn, filter, exclude_prefix))
else:
if filter:
pathname = fn.lower()
_, ext = os.path.splitext(basename)
ext = ext.lstrip('.')
if filter(pathname, basename, ext):
lst.append(fn)
else:
lst.append(fn)
except Exception:
pass
return lst
def traceback(domain='GoSublime'):
return '%s: %s' % (domain, tbck.format_exc())
def show_traceback(domain):
show_output(domain, traceback(), replace=False, merge_domain=False)
def maybe_unicode_str(s):
try:
return isinstance(s, unicode)
except NameError:
return isinstance(s, str)
def ustr(s):
if maybe_unicode_str(s):
return s
for e in try_encodings:
try:
return str_decode(s, e, 'strict')
except UnicodeDecodeError:
continue
return str_decode(s, 'utf-8', 'replace')
def astr(s):
if maybe_unicode_str(s):
if PY3K:
return s
return s.encode('utf-8')
return str(s)
def lst(*a):
l = []
for v in a:
if is_a([], v):
l.extend(v)
else:
l.append(v)
return l
def dval(v, d):
if v is not None:
if is_a_string(d) and is_a_string(v):
return v
if is_a(v, d):
return v
return d
def tm_path(name):
d = {
'9o': 'syntax/GoSublime-9o.tmLanguage',
'doc': 'GsDoc.hidden-tmLanguage',
'go': 'syntax/GoSublime-Go.tmLanguage',
'gohtml': 'syntax/GoSublime-HTML.tmLanguage',
}
try:
so = sublime.load_settings('GoSublime-next.sublime-settings')
if 'go' in so.get('extensions', []):
d['go'] = 'GoSublime-next.tmLanguage'
except Exception:
pass
return 'Packages/GoSublime/%s' % d[name]
def packages_dir():
fn = attr('gs.packages_dir')
if not fn:
fn = sublime.packages_path()
set_attr('gs.packages_dir', fn)
return fn
def dist_path(*a):
return os.path.join(packages_dir(), 'GoSublime', *a)
def mkdirp(fn):
try:
os.makedirs(fn)
except:
pass
def _home_path(*a):
return os.path.join(packages_dir(), 'User', 'GoSublime', about.PLATFORM, *a)
def home_dir_path(*a):
fn = _home_path(*a)
mkdirp(fn)
return fn
def home_path(*a):
fn = _home_path(*a)
mkdirp(os.path.dirname(fn))
return fn
def json_decode(s, default):
try:
res = json.loads(s)
if is_a(res, default):
return (res, '')
return (res, 'Unexpected value type')
except Exception as ex:
return (default, 'Decode Error: %s' % ex)
def json_encode(a):
try:
return (json.dumps(a), '')
except Exception as ex:
return ('', 'Encode Error: %s' % ex)
def attr(k, d=None):
with _attr_lck:
v = _attr.get(k, None)
return d if v is None else copy.copy(v)
def set_attr(k, v):
with _attr_lck:
_attr[k] = v
def del_attr(k):
with _attr_lck:
try:
v = _attr[k]
except Exception:
v = None
try:
del _attr[k]
except Exception:
pass
return v
# note: this functionality should not be used inside this module
# continue to use the try: X except: X=Y hack
def checked(domain, k):
with _checked_lck:
k = 'common.checked.%s.%s' % (domain, k)
v = _checked.get(k, False)
_checked[k] = True
return v
def sel(view, i=0):
try:
s = view.sel()
if s is not None and i < len(s):
return s[i]
except Exception:
pass
return sublime.Region(0, 0)
def which_ok(fn):
try:
return os.path.isfile(fn) and os.access(fn, os.X_OK)
except Exception:
return False
def which(cmd):
if os.path.isabs(cmd):
return cmd if which_ok(cmd) else ''
# not supporting PATHEXT. period.
if os_is_windows():
cmd = '%s.exe' % cmd
seen = {}
for p in getenv('PATH', '').split(os.pathsep):
p = os.path.join(p, cmd)
if p not in seen and which_ok(p):
return p
seen[p] = True
return ''
try:
st2_status_message
except:
sm_lck = threading.Lock()
sm_task_counter = 0
sm_tasks = {}
sm_frame = 0
sm_frames = (
u'\u25D2',
u'\u25D1',
u'\u25D3',
u'\u25D0'
)
sm_tm = datetime.datetime.now()
sm_text = ''
sm_set_text = ''
st2_status_message = sublime.status_message
sublime.status_message = status_message
DEVNULL = open(os.devnull, 'w')
LOGFILE = DEVNULL
try:
gs9o
except Exception:
gs9o = {}
def gs_init(m={}):
global LOGFILE
try:
LOGFILE = open(home_path('log.txt'), 'a+')
except Exception as ex:
LOGFILE = DEVNULL
notice(NAME, 'Cannot create log file. Remote(margo) and persistent logging will be disabled. Error: %s' % ex)
sched_sm_cb()
settings_obj().clear_on_change("GoSublime.settings")
settings_obj().add_on_change("GoSublime.settings", sync_settings)
sync_settings()
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Neroburner
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import absolute_import
from __future__ import print_function
from __future__ import absolute_import
import os, sys
import logging
import config # user-credentials
from ext_libs.googleplay_api.googleplay import GooglePlayAPI #GooglePlayAPI
from ext_libs.googleplay_api.googleplay import LoginError
from ext_libs.androguard.core.bytecodes import apk as androguard_apk #Androguard
def connect():
api = GooglePlayAPI(androidId=config.ANDROID_ID, lang=config.LANG)
try :
api.login(config.GOOGLE_LOGIN, config.GOOGLE_PASSWORD, config.AUTH_TOKEN)
except LoginError, exc:
logging.error("Error: Connection to PlayStore failed: %s" % exc)
return None
logging.info("Info: Connection to GooglePlayStore established")
return api
def update(playstore_api, apk_folder_path):
# search for apks in given folder
list_of_apks = [filename for filename in os.listdir(apk_folder_path) if os.path.splitext(filename)[1] == ".apk"]
if len(list_of_apks) <= 0:
print("No apks found in folder %s" % apk_folder_path)
sys.exit(0)
# create a list of apks, just keep the newest
apks_to_update = dict()
for position, filename in enumerate(list_of_apks):
filepath = os.path.join(apk_folder_path, filename)
a = androguard_apk.APK(filepath)
apk_version_code = int(a.get_androidversion_code())
packagename = a.get_package()
logging.info("Info: Found apk %s : %s : %d" % (filepath, packagename, apk_version_code))
if packagename in apks_to_update:
if apks_to_update[packagename] < apk_version_code:
logging.info("Found newer local version %s : %d -> %d" % (packagename, apks_to_update[packagename], apk_version_code))
apks_to_update[packagename] = apk_version_code
else:
logging.info("Set new local apk %s : %d" % (packagename, apk_version_code))
apks_to_update[packagename] = apk_version_code
if len(apks_to_update) <= 0:
logging.error("Error: No apks to update after non-empty apk-list. Something went wrong!")
sys.exit(1)
# search for the apks on googleplaystore
for packagename, version_code in apks_to_update.items():
local_version_code = int(version_code)
logging.info("Info: Checking apk %s : %d" % (packagename, local_version_code))
m = playstore_api.details(packagename)
doc = m.docV2
store_version_code = int(doc.details.appDetails.versionCode)
if store_version_code == 0:
continue
if store_version_code > local_version_code:
# download apk from store
print("Updating apk %s : %d -> %d" % (packagename, local_version_code, store_version_code))
try:
data = playstore_api.download(packagename, store_version_code)
except Exception as exc:
logging.error("Error: failed to download %s : %s" % (packagename, exc))
continue
else:
# save downloaded apk under '<packagename>_<version>.apk'
filename = "%s_%d.apk" % (packagename, store_version_code)
filepath = os.path.join(apk_folder_path, filename)
try:
open(filepath, "wb").write(data)
except IOError, exc:
logging.error("Error: cannot write to disk %s : %s" % (packagename, exc))
continue
logging.info("Info: Downloaded apk %s : %d : %s" % (packagename, store_version_code, filename))
else:
logging.info("Info: No newer apk found.")
# call 'fdroid --clean'
def synopsis():
print("Usage: %s [-v] <apk_folder_path>" % sys.argv[0])
print("\t-v\t verbose output")
def main():
print(sys.argv)
min_argc = 2
path_index = 1
# Check arguments
if "-v" in sys.argv:
min_argc += 1
path_index += 1
logging.basicConfig(level=logging.INFO)
# TODO: --config flag
if len(sys.argv) < min_argc:
synopsis()
sys.exit(1)
# get apk_folder_path
apk_folder_path = sys.argv[path_index]
if not os.path.isdir(apk_folder_path):
print("Error: given <apk_folder_path> is not a directory: %s" % apk_folder_path)
synopsis()
sys.exit(1)
# connect to Google Play Store
playstore_api = connect()
if playstore_api == None:
print("Error: Connection to PlayStore failed. Check provided credencials in config.py")
sys.exit(1)
# update local apks
update(playstore_api, apk_folder_path)
if __name__ == '__main__':
main()
cleanup
- fix typos
- remove redundant 'info:' and 'error:' in logging messages
- comment code
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Neroburner
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import absolute_import
from __future__ import print_function
from __future__ import absolute_import
import os, sys
import logging
import config # user-credentials
from ext_libs.googleplay_api.googleplay import GooglePlayAPI #GooglePlayAPI
from ext_libs.googleplay_api.googleplay import LoginError
from ext_libs.androguard.core.bytecodes import apk as androguard_apk #Androguard
def connect():
api = GooglePlayAPI(androidId=config.ANDROID_ID, lang=config.LANG)
try :
api.login(config.GOOGLE_LOGIN, config.GOOGLE_PASSWORD, config.AUTH_TOKEN)
except LoginError, exc:
logging.error("Connection to PlayStore failed: %s" % exc)
return None
logging.info("Connection to GooglePlayStore established")
return api
def update(playstore_api, apk_folder_path):
# search for apks in given folder
list_of_apks = [filename for filename in os.listdir(apk_folder_path) if os.path.splitext(filename)[1] == ".apk"]
if len(list_of_apks) <= 0:
print("No apks found in folder %s" % apk_folder_path)
sys.exit(0)
# create a list of apks, just keep the newest
apks_to_update = dict()
for filename in list_of_apks:
filepath = os.path.join(apk_folder_path, filename)
a = androguard_apk.APK(filepath)
apk_version_code = int(a.get_androidversion_code())
packagename = a.get_package()
logging.info("Found apk %s : %s : %d" % (filepath, packagename, apk_version_code))
if packagename in apks_to_update:
if apks_to_update[packagename] < apk_version_code:
logging.info("Found newer local version %s : %d -> %d" % (packagename, apks_to_update[packagename], apk_version_code))
apks_to_update[packagename] = apk_version_code
else:
logging.info("Set new local apk %s : %d" % (packagename, apk_version_code))
apks_to_update[packagename] = apk_version_code
# are there still apks to check? If not something went wrong
if len(apks_to_update) <= 0:
logging.error("No apks to update after non-empty apk-list. Something went wrong!")
sys.exit(1)
# search for the apks on googleplaystore
for packagename, version_code in apks_to_update.items():
local_version_code = int(version_code)
logging.info("Checking apk %s : %d" % (packagename, local_version_code))
# get infos of the store-version
m = playstore_api.details(packagename)
doc = m.docV2
store_version_code = int(doc.details.appDetails.versionCode)
if store_version_code == 0:
logging.warning("Got store_version_code == 0 for package %s : %d" % (packagename, local_version_code))
continue
# check if there is an update
if store_version_code > local_version_code:
# download apk from store
print("Updating apk %s : %d -> %d" % (packagename, local_version_code, store_version_code))
try:
data = playstore_api.download(packagename, store_version_code)
except Exception as exc:
logging.error("failed to download %s : %s" % (packagename, exc))
continue
else:
# save downloaded apk under '<packagename>_<version>.apk'
filename = "%s_%d.apk" % (packagename, store_version_code)
filepath = os.path.join(apk_folder_path, filename)
try:
open(filepath, "wb").write(data)
except IOError, exc:
logging.error("cannot write to disk %s : %s" % (packagename, exc))
continue
logging.info("Downloaded apk %s : %d to file %s" % (packagename, store_version_code, filename))
else:
logging.info("No newer apk found.")
def synopsis():
print("Usage: %s [-v] <apk_folder_path>" % sys.argv[0])
print("\t-v\t verbose output")
def main():
#print(sys.argv)
min_argc = 2
path_index = 1
# Check arguments
if "-v" in sys.argv:
min_argc += 1
path_index += 1
logging.basicConfig(level=logging.INFO)
# TODO: --config flag
if len(sys.argv) < min_argc:
synopsis()
sys.exit(1)
# get apk_folder_path
apk_folder_path = sys.argv[path_index]
if not os.path.isdir(apk_folder_path):
logging.error("given <apk_folder_path> is not a directory: %s" % apk_folder_path)
synopsis()
sys.exit(1)
# connect to Google Play Store
playstore_api = connect()
if playstore_api == None:
logging.error("Connection to PlayStore failed. Check provided credencials in config.py")
sys.exit(1)
# update local apks
update(playstore_api, apk_folder_path)
if __name__ == '__main__':
main()
|
#Test file for function that control gpio pins on rpi
##import RPi.GPIO as GPIO #To interface with Raspberry Pis GPIO pins
import time #To sleep program
#Get pin to test on
pin = input("Pin to interface with: ")
done = False
while not done:
#Get time in milliseconds to run pump
pump_time = input("Time to pump (in milliseconds): ")
#Check for done or exit
if (pump_time == "done") or (pump_time == "exit"):
done = True
#Otherwise run pump
else:
#Setup output pin, default turn on is low voltage
##GPIO.setup(pin, GPIO.OUT)
#Set GPIO pin to high voltage
#Should turn pump on
##GPIO.output(pin, True)
#Wait specified time before turning off
time.sleep(int(pump_time))
#Set GPIO pin to low voltage
#Should turn pump off
##GPIO.output(pin, False)
GPIO test file fully functional
#Test file for function that control gpio pins on rpi
import RPi.GPIO as GPIO #To interface with Raspberry Pis GPIO pins
import time #To sleep program
#Get pin to test on
pin = input("Pin to interface with: ")
done = False
while not done:
#Get time in seconds to run pump
pump_time = input("Time to pump (in seconds): ")
#Check for done or exit
if (pump_time == "done") or (pump_time == "exit"):
done = True
#Otherwise run pump
else:
#Setup output pin, default turn on is low voltage
GPIO.setup(pin, GPIO.OUT)
#Set GPIO pin to high voltage
#Should turn pump on
GPIO.output(pin, True)
#Wait specified time before turning off
time.sleep(int(pump_time))
#Set GPIO pin to low voltage
#Should turn pump off
GPIO.output(pin, False) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.