text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import cache_page
from coredata.forms import RoleForm, UnitRoleForm, InstrRoleFormSet, MemberForm, PersonForm, TAForm, \
UnitAddressForm, UnitForm, SemesterForm, SemesterWeekFormset, HolidayFormset, SysAdminSearchForm, \
TemporaryPersonForm, CourseHomePageForm, OneOfferingForm, NewCombinedForm, AnyPersonForm, RoleAccountForm, \
OffboardForm
from courselib.auth import requires_global_role, requires_role, requires_course_staff_by_slug, ForbiddenResponse, \
has_formgroup, has_global_role
from courselib.search import get_query, find_userid_or_emplid
from coredata.models import Person, Semester, CourseOffering, Course, Member, Role, Unit, SemesterWeek, Holiday, \
AnyPerson, FuturePerson, RoleAccount, CombinedOffering, UNIT_ROLES, ROLES, ROLE_DESCR, INSTR_ROLES
from coredata import panel
from advisornotes.models import NonStudent
from onlineforms.models import FormGroup, FormGroupMember
from log.models import LogEntry
from coredata.models import LONG_LIVED_ROLES
from django.urls import reverse
from django.contrib import messages
from cache_utils.decorators import cached
from haystack.query import SearchQuerySet
import socket, json, datetime, os
import iso8601
from functools import reduce
from operator import itemgetter
@requires_global_role("SYSA")
def sysadmin(request):
if 'usersearch' in request.GET:
# user search
form = SysAdminSearchForm(request.GET)
if form.is_valid() and form.cleaned_data['user']:
emplid = form.cleaned_data['user'].emplid
return HttpResponseRedirect(reverse('sysadmin:user_summary', kwargs={'userid': emplid}))
elif 'offeringsearch' in request.GET:
# course offering search
form = SysAdminSearchForm(request.GET)
if form.is_valid() and form.cleaned_data['offering']:
offering = form.cleaned_data['offering']
return HttpResponseRedirect(reverse('sysadmin:offering_summary', kwargs={'course_slug': offering.slug}))
else:
form = SysAdminSearchForm()
return render(request, 'coredata/sysadmin.html', {'form': form})
@requires_global_role("SYSA")
def role_list(request):
"""
Display list of who has what role
"""
roles = Role.objects_fresh.exclude(role="NONE").select_related('person', 'unit')
return render(request, 'coredata/roles.html', {'roles': roles})
@requires_global_role("SYSA")
def expired_role_list(request):
"""
Display long-lived roles that have accidentally expired
"""
roles = Role.objects.filter(role__in=LONG_LIVED_ROLES, expiry__lt=datetime.date.today()).select_related('person', 'unit')
return render(request, 'coredata/expired_roles.html', {'roles': roles})
@requires_global_role("SYSA")
def new_role(request, role=None):
if request.method == 'POST':
form = RoleForm(request.POST)
if form.is_valid():
r = form.save(commit=False)
r.config['giver'] = request.user.username
r.config['given_date'] = datetime.date.today().isoformat()
r.save()
messages.success(request, 'Added role %s for %s.' % (form.instance.get_role_display(), form.instance.person.name()))
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("new role: %s as %s") % (form.instance.person.userid, form.instance.role),
related_object=form.instance)
l.save()
return HttpResponseRedirect(reverse('sysadmin:role_list'))
else:
form = RoleForm(initial={'expiry': datetime.date.today() + datetime.timedelta(days=365)})
return render(request, 'coredata/new_role.html', {'form': form})
@requires_global_role("SYSA")
def renew_role(request, role_id):
if request.method != 'POST':
return ForbiddenResponse(request)
role = get_object_or_404(Role, pk=role_id)
new_exp = datetime.date.today() + datetime.timedelta(days=365)
role.expiry = new_exp
role.save()
messages.success(request, 'Renewed role for %s until %s.' % (role.person.name(), new_exp))
# LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("renewed role: %s for %s in %s until %s") % (
role.get_role_display(), role.person.name(), role.unit, new_exp),
related_object=role.person)
l.save()
return HttpResponseRedirect(reverse('sysadmin:role_list'))
@requires_global_role("SYSA")
def delete_role(request, role_id):
role = get_object_or_404(Role, pk=role_id)
messages.success(request, 'Deleted role %s for %s.' % (role.get_role_display(), role.person.name()))
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("deleted role: %s for %s") % (role.get_role_display(), role.person.name()),
related_object=role.person)
l.save()
role.delete()
return HttpResponseRedirect(reverse('sysadmin:role_list'))
@requires_global_role("SYSA")
def unit_list(request):
"""
Display list of all units
"""
units = Unit.objects.all()
return render(request, 'coredata/units.html', {'units': units})
@requires_global_role("SYSA")
def edit_unit(request, unit_slug=None):
if unit_slug:
unit = get_object_or_404(Unit, slug=unit_slug)
else:
unit = Unit()
if request.method == 'POST':
form = UnitForm(instance=unit, data=request.POST)
if form.is_valid():
unit.slug = None
form.save()
messages.success(request, 'Edited unit %s.' % (unit.name))
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("edited unit %s") % (form.instance.slug),
related_object=unit)
l.save()
return HttpResponseRedirect(reverse('sysadmin:unit_list'))
else:
form = UnitForm(instance=unit)
context = {'form': form}
return render(request, 'coredata/edit_unit.html', context)
@requires_global_role("SYSA")
def members_list(request):
members = Member.objects.exclude(added_reason="AUTO").exclude(added_reason="CTA").exclude(added_reason="TAC") \
.select_related('offering__semester')
return render(request, 'coredata/members_list.html', {'members': members})
@requires_global_role("SYSA")
def edit_member(request, member_id=None):
if member_id:
member = get_object_or_404(Member, id=member_id)
else:
member = None
if request.method == 'POST':
form = MemberForm(request.POST, instance=member)
if form.is_valid():
form.save()
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("edited membership: %s as %s in %s") % (form.instance.person.userid, form.instance.role, form.instance.offering),
related_object=form.instance)
l.save()
return HttpResponseRedirect(reverse('sysadmin:members_list'))
elif member_id:
form = MemberForm(instance=member, initial={'person': member.person.userid})
else:
form = MemberForm()
return render(request, 'coredata/edit_member.html', {'form': form, 'member': member})
@requires_global_role("SYSA")
def user_summary(request, userid):
query = find_userid_or_emplid(userid)
person = get_object_or_404(Person, query)
if request.method == 'POST':
from coredata.importer import import_person
grad_data = 'import-grad' in request.POST
person = import_person(person, commit=True, grad_data=grad_data)
messages.success(request, 'Imported SIMS data for %s.' % (person.userid_or_emplid()))
memberships = Member.objects.filter(person=person)
roles = Role.objects_fresh.filter(person=person).exclude(role="NONE").select_related('unit')
context = {'person': person, 'memberships': memberships, 'roles': roles}
return render(request, "coredata/user_summary.html", context)
@requires_global_role("SYSA")
def user_config(request, userid):
query = find_userid_or_emplid(userid)
person = get_object_or_404(Person, query)
return render(request, "coredata/user_config.html", {'person': person})
@requires_global_role("SYSA")
def offering_summary(request, course_slug):
offering = get_object_or_404(CourseOffering, slug=course_slug)
staff = Member.objects.filter(offering=offering, role__in=['INST', 'TA'])
context = {'offering': offering, 'staff': staff}
return render(request, "coredata/offering_summary.html", context)
@requires_global_role("SYSA")
def new_person(request):
if request.method == 'POST':
form = PersonForm(request.POST)
if form.is_valid():
form.save()
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("new person added: %s (%s)") % (form.instance.name(), form.instance.userid),
related_object=form.instance)
l.save()
return HttpResponseRedirect(reverse('sysadmin:sysadmin'))
else:
form = PersonForm()
return render(request, 'coredata/new_person.html', {'form': form})
# semester object management
@requires_global_role("SYSA")
def semester_list(request):
semesters = Semester.objects.all()
return render(request, 'coredata/semester_list.html', {'semesters': semesters})
@requires_global_role("SYSA")
def edit_semester(request, semester_name=None):
if semester_name:
semester = get_object_or_404(Semester, name=semester_name)
newsem = False
else:
semester = Semester()
newsem = True
if request.method == 'POST':
form = SemesterForm(instance=semester, prefix='sem', data=request.POST)
week_formset = SemesterWeekFormset(queryset=SemesterWeek.objects.filter(semester=semester), prefix='week', data=request.POST)
holiday_formset = HolidayFormset(queryset=Holiday.objects.filter(semester=semester), prefix='holiday', data=request.POST)
if form.is_valid() and week_formset.is_valid() and holiday_formset.is_valid():
sem = form.save()
weeks = week_formset.save(commit=False)
for week in weeks:
week.semester = sem
week.save()
holidays = holiday_formset.save(commit=False)
for holiday in holidays:
holiday.semester = sem
holiday.save()
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("edited semester %s") % (sem.name),
related_object=sem)
l.save()
messages.success(request, 'Edited semester %s.' % (sem.name))
return HttpResponseRedirect(reverse('sysadmin:semester_list', kwargs={}))
else:
form = SemesterForm(instance=semester, prefix='sem')
week_formset = SemesterWeekFormset(queryset=SemesterWeek.objects.filter(semester=semester), prefix='week')
holiday_formset = HolidayFormset(queryset=Holiday.objects.filter(semester=semester), prefix='holiday')
context = {'semester': semester, 'form': form, 'newsem': newsem,
'week_formset': week_formset, 'holiday_formset': holiday_formset}
return render(request, 'coredata/edit_semester.html', context)
# combined sections admin
@requires_global_role("SYSA")
def combined_offerings(request):
combined = CombinedOffering.objects.all()
new_form = OneOfferingForm()
context = {
'combined': combined,
'new_form': new_form,
}
return render(request, 'coredata/combined_offerings.html', context)
def _new_fake_class_nbr(semester):
# largest class_nbr in production is 47348. Assuming that >65536 can be reserved as fakes.
from django.db.models import Max
max_offering = CourseOffering.objects.filter(semester=semester).aggregate(Max('class_nbr'))['class_nbr__max']
max_combined = CombinedOffering.objects.filter(semester=semester).aggregate(Max('class_nbr'))['class_nbr__max']
nbr = 65536
if max_offering:
nbr = max(nbr, max_offering)
if max_combined:
nbr = max(nbr, max_combined)
return nbr+1
@requires_global_role("SYSA")
def new_combined(request):
offering_id = request.GET.get('offering', None)
offering = get_object_or_404(CourseOffering, id=offering_id)
if request.method == 'POST':
form = NewCombinedForm(request.POST)
if form.is_valid():
combined = form.save(commit=False)
combined.semester = offering.semester
combined.crse_id = offering.crse_id
combined.class_nbr = _new_fake_class_nbr(combined.semester)
combined.save()
combined.offerings.add(offering)
combined.create_combined_offering()
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("created combined offering %i with %s") % (combined.id, offering.slug),
related_object=combined)
l.save()
messages.success(request, 'Created combined offering.')
return HttpResponseRedirect(reverse('sysadmin:combined_offerings', kwargs={}))
else:
# set up creation form from the offering given
initial = {
'subject': offering.subject,
'number': offering.number,
'section': 'X100',
'component': offering.component,
'instr_mode': offering.instr_mode,
'owner': offering.owner,
'title': offering.title,
'campus': offering.campus,
}
form = NewCombinedForm(initial=initial)
context = {
'form': form,
}
return render(request, 'coredata/edit_combined.html', context)
@requires_global_role("SYSA")
def add_combined_offering(request, pk):
combined = get_object_or_404(CombinedOffering, pk=pk)
if request.method == 'POST':
form = OneOfferingForm(request.POST)
if form.is_valid():
offering = form.cleaned_data['offering']
if offering in combined.offerings.all():
messages.error(request, 'That offering is already in the combined section.')
else:
combined.offerings.add(offering)
combined.create_combined_offering()
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("added %s to combined offering %i") % (offering.slug, combined.id),
related_object=combined)
l.save()
messages.success(request, 'Added offering.')
return HttpResponseRedirect(reverse('sysadmin:combined_offerings', kwargs={}))
else:
form = OneOfferingForm()
context = {
'form': form,
}
return render(request, 'coredata/add_combined_offering.html', context)
@requires_global_role("SYSA")
def admin_panel(request):
if 'content' in request.GET:
if request.GET['content'] == 'deploy_checks':
passed, failed = panel.deploy_checks(request=request)
return render(request, 'coredata/admin_panel_tab.html', {'passed': passed, 'failed': failed})
elif request.GET['content'] == 'settings_info':
data = panel.settings_info()
return render(request, 'coredata/admin_panel_tab.html', {'settings_data': data})
elif request.GET['content'] == 'psinfo':
data = panel.ps_info()
return render(request, 'coredata/admin_panel_tab.html', {'psinfo': data})
elif request.GET['content'] == 'email':
user = Person.objects.get(userid=request.user.username)
return render(request, 'coredata/admin_panel_tab.html', {'email': user.email()})
elif request.GET['content'] == 'celery':
data = panel.celery_info()
return render(request, 'coredata/admin_panel_tab.html', {'celery': data})
elif request.GET['content'] == 'tasks':
return render(request, 'coredata/admin_panel_tab.html', {'tasks': True})
elif request.GET['content'] == 'request':
import pprint
return render(request, 'coredata/admin_panel_tab.html', {'the_request': pprint.pformat(request.__dict__)})
elif request.GET['content'] == 'git':
git = {}
git['branch'] = panel.git_branch().decode('utf8')
git['revision'] = panel.git_revision().decode('utf8')
return render(request, 'coredata/admin_panel_tab.html', {'git':git})
elif request.GET['content'] == 'pip':
data = panel.pip_info()
return render(request, 'coredata/admin_panel_tab.html', {'pip': data})
elif request.GET['content'] == 'csrpt':
data = panel.csrpt_info()
return render(request, 'coredata/admin_panel_tab.html', {'csrpt': data})
elif request.GET['content'] == 'environ':
environ = [(k,v) for k,v in os.environ.items()]
environ.sort()
return render(request, 'coredata/admin_panel_tab.html', {'environ': environ})
elif request.GET['content'] == 'throw':
raise RuntimeError('This is a deliberately-thrown exception to test exception-handling in the system. It can be ignored.')
elif request.GET['content'] == 'slow':
import time
t = int(request.GET.get('t', '25'))
time.sleep(t)
resp = render(request, 'coredata/admin_panel_tab.html', {})
if 'okay' in request.GET:
resp.slow_okay = True
return resp
elif request.method == 'POST':
if 'email' in request.POST:
email = request.POST['email']
success, res = panel.send_test_email(email)
if success:
messages.success(request, res)
else:
messages.error(request, res)
elif 'tasks' in request.POST:
if 'daily' in request.POST:
from coredata.tasks import import_task
import_task.apply_async()
messages.success(request, 'Daily import task started.')
elif 'visits' in request.POST:
from advisornotes.tasks import program_info_for_advisorvisits
program_info_for_advisorvisits.apply_async()
messages.success(request, 'Advisor visit task started.')
elif 'grad' in request.POST:
from coredata.tasks import import_grads
from grad.tasks import update_statuses_to_current
update_statuses_to_current.apply_async()
import_grads.apply_async()
messages.success(request, 'Grad update and import tasks started.')
context = {
'loadavg': os.getloadavg()
}
return render(request, 'coredata/admin_panel.html', context)
# Methods for managing AnyPersons
@requires_global_role("SYSA")
def list_anypersons(request):
anypersons = AnyPerson.objects.all()
context = {'anypersons': anypersons}
return render(request, 'coredata/any_persons.html', context)
@requires_global_role("SYSA")
def delete_anyperson(request, anyperson_id):
anyperson = get_object_or_404(AnyPerson, pk=anyperson_id)
if request.method == 'POST':
anyperson.delete()
messages.success(request, 'Deleted anyperson for %s' % anyperson)
l = LogEntry(userid=request.user.username,
description="deleted anyperson: %s" % anyperson,
related_object=anyperson)
l.save()
return HttpResponseRedirect(reverse('sysadmin:list_anypersons'))
@requires_global_role("SYSA")
def add_anyperson(request):
if request.method == 'POST':
form = AnyPersonForm(request.POST)
if form.is_valid():
ap = form.save()
messages.add_message(request,
messages.SUCCESS,
'AnyPerson %s was created.' % ap
)
l = LogEntry(userid=request.user.username,
description="added anyperson: %s" % ap,
related_object=ap
)
l.save()
return HttpResponseRedirect(reverse('sysadmin:list_anypersons'))
else:
form = AnyPersonForm()
return render(request, 'coredata/new_anyperson.html', {'form': form})
@requires_global_role("SYSA")
def edit_anyperson(request, anyperson_id):
anyperson = get_object_or_404(AnyPerson, pk=anyperson_id)
if request.method == 'POST':
form = AnyPersonForm(request.POST, instance=anyperson)
if form.is_valid():
ap = form.save()
messages.add_message(request,
messages.SUCCESS,
'AnyPerson for %s was edited.' % ap
)
l = LogEntry(userid=request.user.username,
description="edited anyperson: %s" % ap,
related_object=ap
)
l.save()
return HttpResponseRedirect(reverse('sysadmin:list_anypersons'))
else:
initial_values = {}
if anyperson.person:
initial_values['person'] = anyperson.person.emplid
if anyperson.future_person:
initial_values['future_person'] = anyperson.future_person_id
if anyperson.role_account:
initial_values['role_account'] = anyperson.role_account_id
form = AnyPersonForm(instance=anyperson, initial=initial_values)
return render(request, 'coredata/edit_anyperson.html', {'form': form, 'anyperson_id': anyperson_id})
@requires_global_role("SYSA")
def delete_empty_anypersons(request):
if request.method == 'POST':
res = AnyPerson.delete_empty_anypersons()
messages.add_message(request,
messages.SUCCESS,
'Deleted %s empty AnyPersonn(s).' % str(res)
)
return HttpResponseRedirect(reverse('sysadmin:list_anypersons'))
@requires_global_role("SYSA")
def list_futurepersons(request):
futurepersons = FuturePerson.objects.all()
context = {'futurepersons': futurepersons}
return render(request, 'coredata/future_persons.html', context)
@requires_global_role("SYSA")
def edit_futureperson(request, futureperson_id):
return HttpResponseRedirect(reverse('faculty:edit_futureperson', kwargs={'futureperson_id': futureperson_id,
'from_admin': 1}))
@requires_global_role("SYSA")
def delete_futureperson(request, futureperson_id):
if request.method == 'POST':
futureperson = FuturePerson.objects.get(pk=futureperson_id)
futureperson.delete()
messages.success(request, 'Deleted futureperson %s' % futureperson)
l = LogEntry(userid=request.user.username,
description="deleted futureperson: %s" % futureperson,
related_object=futureperson)
l.save()
return HttpResponseRedirect(reverse('sysadmin:list_futurepersons'))
@requires_global_role("SYSA")
def add_futureperson(request):
from faculty.forms import FuturePersonForm
if request.method == 'POST':
form = FuturePersonForm(request.POST)
if form.is_valid():
new_future_person = form.save(commit=False)
new_future_person.set_email(form.cleaned_data.get('email'))
new_future_person.set_gender(form.cleaned_data.get('gender'))
new_future_person.set_sin(form.cleaned_data.get('sin'))
new_future_person.set_birthdate(form.cleaned_data.get('birthdate'))
new_future_person.save()
messages.add_message(request,
messages.SUCCESS,
'FuturePerson %s was edited.' % new_future_person
)
l = LogEntry(userid=request.user.username,
description="Added FuturePerson: %s" % new_future_person,
related_object=new_future_person
)
l.save()
return HttpResponseRedirect(reverse('sysadmin:list_futurepersons'))
else:
form = FuturePersonForm()
return render(request, 'coredata/new_futureperson.html', {'form': form})
@requires_global_role("SYSA")
def view_futureperson(request, futureperson_id):
return HttpResponseRedirect(reverse('faculty:view_futureperson', kwargs={'futureperson_id': futureperson_id,
'from_admin': 1}))
@requires_global_role("SYSA")
def list_roleaccounts(request):
roleaccounts = RoleAccount.objects.all()
context = {'roleaccounts': roleaccounts}
return render(request, 'coredata/role_accounts.html', context)
@requires_global_role("SYSA")
def delete_roleaccount(request, roleaccount_id):
roleaccount = RoleAccount.objects.get(pk=roleaccount_id)
if request.method == 'POST':
roleaccount.delete()
messages.success(request, 'Deleted roleaccount %s' % roleaccount)
l = LogEntry(userid=request.user.username,
description="deleted roleaccount: %s" % roleaccount,
related_object=roleaccount)
l.save()
return HttpResponseRedirect(reverse('sysadmin:list_roleaccounts'))
@requires_global_role("SYSA")
def edit_roleaccount(request, roleaccount_id):
roleaccount = get_object_or_404(RoleAccount, pk=roleaccount_id)
if request.method == 'POST':
form = RoleAccountForm(request.POST, instance=roleaccount)
if form.is_valid():
ra = form.save()
messages.add_message(request,
messages.SUCCESS,
'Role Account %s was edited.' % ra
)
l = LogEntry(userid=request.user.username,
description="edited roleaccount: %s" % ra,
related_object=ra
)
l.save()
return HttpResponseRedirect(reverse('sysadmin:list_roleaccounts'))
else:
form = RoleAccountForm(instance=roleaccount)
return render(request, 'coredata/edit_roleaccount.html', {'form': form, 'roleaccount_id': roleaccount_id})
@requires_global_role("SYSA")
def add_roleaccount(request):
if request.method == 'POST':
form = RoleAccountForm(request.POST)
if form.is_valid():
ra = form.save()
messages.add_message(request,
messages.SUCCESS,
'added roleaccount %s' % ra
)
l = LogEntry(userid=request.user.username,
description="added roleaccount: %s" % ra,
related_object=ra
)
l.save()
return HttpResponseRedirect(reverse('sysadmin:list_roleaccounts'))
else:
form = RoleAccountForm()
return render(request, 'coredata/new_roleaccount.html', {'form': form})
# views to let instructors manage TAs
@requires_course_staff_by_slug
def manage_tas(request, course_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
longform = False
if not Member.objects.filter(offering=course, person__userid=request.user.username, role="INST"):
# only instructors can manage TAs
return ForbiddenResponse(request, "Only instructors can manage TAs")
if request.method == 'POST' and 'action' in request.POST and request.POST['action']=='add':
form = TAForm(offering=course, data=request.POST)
if form.non_field_errors():
# have an unknown userid
longform = True
elif form.is_valid():
userid = form.cleaned_data['userid']
if not Person.objects.filter(userid=userid) \
and form.cleaned_data['fname'] and form.cleaned_data['lname']:
# adding a new person: handle that.
eid = 1
# search for an unused temp emplid
while True:
emplid = "%09i" % (eid)
if not Person.objects.filter(emplid=emplid):
break
eid += 1
p = Person(first_name=form.cleaned_data['fname'], pref_first_name=form.cleaned_data['fname'], last_name=form.cleaned_data['lname'], middle_name='', userid=userid, emplid=emplid)
p.save()
else:
p = Person.objects.get(userid=userid)
m = Member(person=p, offering=course, role="TA", credits=0, career="NONS", added_reason="TAIN")
m.save()
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("TA added by instructor: %s for %s") % (userid, course),
related_object=m)
l.save()
messages.success(request, 'Added %s as a TA.' % (p.name()))
return HttpResponseRedirect(reverse('offering:manage_tas', kwargs={'course_slug': course.slug}))
elif request.method == 'POST' and 'action' in request.POST and request.POST['action']=='del':
userid = request.POST['userid']
ms = Member.objects.filter(person__userid=userid, offering=course, role="TA", added_reason="TAIN")
if ms:
m = ms[0]
m.role = "DROP"
m.save()
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("TA removed by instructor: %s for %s") % (userid, course),
related_object=m)
l.save()
messages.success(request, 'Removed %s as a TA.' % (m.person.name()))
return HttpResponseRedirect(reverse('offering:manage_tas', kwargs={'course_slug': course.slug}))
else:
form = TAForm(offering=course)
tas = Member.objects.filter(role="TA", offering=course)
context = {'course': course, 'form': form, 'tas': tas, 'longform': longform}
return render(request, 'coredata/manage_tas.html', context)
# views for departmental admins to manage permissions
@requires_role("ADMN")
def unit_admin(request):
"""
Unit admin front page
"""
return render(request, 'coredata/unit_admin.html', {'units': Unit.sub_units(request.units)})
@requires_role("ADMN")
def unit_role_list(request):
"""
Display list of who has what role (for department admins)
"""
roles = Role.objects_fresh.filter(unit__in=Unit.sub_units(request.units), role__in=UNIT_ROLES)
return render(request, 'coredata/unit_roles.html', {'roles': roles})
@requires_role("ADMN")
def new_unit_role(request):
role_choices = [(r,ROLES[r]) for r in UNIT_ROLES]
# Make the form more readable by sorting by role long name.
role_choices.sort(key=itemgetter(1))
unit_choices = [(u.id, str(u)) for u in Unit.sub_units(request.units)]
if request.method == 'POST':
form = UnitRoleForm(request.POST)
form.fields['role'].choices = role_choices
form.fields['unit'].choices = unit_choices
if form.is_valid():
r = form.save(commit=False)
r.config['giver'] = request.user.username
r.config['given_date'] = datetime.date.today().isoformat()
r.save()
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("new role: %s as %s in %s") % (form.instance.person.userid, form.instance.role, form.instance.unit),
related_object=form.instance)
l.save()
messages.success(request, "Added role: %s as %s in %s." % (r.person, r.get_role_display(), r.unit.name))
return HttpResponseRedirect(reverse('admin:unit_role_list'))
else:
form = UnitRoleForm(initial={'expiry': datetime.date.today() + datetime.timedelta(days=365)})
form.fields['role'].choices = role_choices
form.fields['unit'].choices = unit_choices
context = {'form': form, 'UNIT_ROLES': UNIT_ROLES, 'ROLE_DESCR': ROLE_DESCR}
return render(request, 'coredata/new_unit_role.html', context)
@requires_role("ADMN")
def offboard_unit(request):
if request.method == 'POST':
form = OffboardForm(request.POST)
if form.is_valid():
person = form.cleaned_data['person']
delete_roles = form.cleaned_data['delete_roles']
delete_formgroups = form.cleaned_data['delete_formgroups']
roles = Role.objects_fresh.filter(person=person, unit__in=Unit.sub_units(request.units),
role__in=UNIT_ROLES)
groups = FormGroup.objects.filter(members=person, unit__in=Unit.sub_units(request.units))
if delete_roles:
for role in roles:
role.delete()
l = LogEntry(userid=request.user.username,
description=("Deleted role: %s in %s via offboarding form.") % (role, role.unit),
related_object=role)
l.save()
messages.success(request, "Removed role %s as %s in %s." % (person, role.get_role_display(), role.unit.label))
if delete_formgroups:
for group in groups:
member = FormGroupMember.objects.get(person=person, formgroup=group)
member.delete()
l = LogEntry(userid=request.user.username,
description=("Removed %s from form group %s (%i) via offboarding form.") % (
person.userid_or_emplid(), group, group.id),
related_object=group)
l.save()
messages.success(request, "Removed %s from formgroup %s" % (person, group))
return HttpResponseRedirect(reverse('admin:unit_role_list'))
else:
form = OffboardForm()
return render(request, 'coredata/offboard_unit.html', {'form': form})
@requires_role("ADMN")
def roles(request, emplid):
person = get_object_or_404(Person, emplid=emplid)
roles = Role.objects_fresh.filter(person=person, unit__in=Unit.sub_units(request.units), role__in=UNIT_ROLES)
groups = FormGroup.objects.filter(members=person, unit__in=Unit.sub_units(request.units))
data = {}
if roles:
data['roles'] = []
if groups:
data['formgroups'] = []
for role in roles:
data['roles'].append('%s in %s' % (role.get_role_display(), role.unit.name))
for group in groups:
data['formgroups'].append(str(group))
response = HttpResponse(content_type='application/json')
json.dump(data, response, indent=1)
return response
@requires_role("ADMN")
def renew_unit_roles_list(request):
"""
Display list of who has what role
"""
allow_renewal = datetime.timedelta(days=182)+datetime.date.today()
roles = Role.objects_fresh.filter(unit__in=Unit.sub_units(request.units), role__in=UNIT_ROLES, expiry__lt=allow_renewal)
return render(request, 'coredata/renew_unit_roles.html', {'roles': roles})
@requires_role("ADMN")
def renew_unit_roles(request, id=None):
"""
Renew Multiple Roles
"""
if request.method == 'POST':
to_renew = request.POST.getlist('renewals')
if to_renew == []:
messages.error(request, 'Please select at least one role to renew.')
else:
for role_id in to_renew:
role = get_object_or_404(Role, pk=role_id, unit__in=Unit.sub_units(request.units), role__in=UNIT_ROLES)
new_exp = datetime.date.today() + datetime.timedelta(days=365)
role.expiry = new_exp
role.save()
messages.success(request, 'Renewed role for %s until %s.' % (role.person.name(), new_exp))
# LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("renewed role: %s for %s in %s until %s") % (
role.get_role_display(), role.person.name(), role.unit, new_exp),
related_object=role.person)
l.save()
return HttpResponseRedirect(reverse('admin:renew_unit_roles_list'))
@requires_role("ADMN")
def renew_unit_role(request, role_id):
if request.method != 'POST':
return ForbiddenResponse(request)
role = get_object_or_404(Role, pk=role_id, unit__in=Unit.sub_units(request.units), role__in=UNIT_ROLES)
new_exp = datetime.date.today() + datetime.timedelta(days=365)
role.expiry = new_exp
role.save()
messages.success(request, 'Renewed role for %s until %s.' % (role.person.name(), new_exp))
# LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("renewed role: %s for %s in %s until %s") % (
role.get_role_display(), role.person.name(), role.unit, new_exp),
related_object=role.person)
l.save()
return HttpResponseRedirect(reverse('admin:unit_role_list'))
@requires_role("ADMN")
def delete_unit_role(request, role_id):
if request.method != 'POST':
return ForbiddenResponse(request)
role = get_object_or_404(Role, pk=role_id, unit__in=Unit.sub_units(request.units), role__in=UNIT_ROLES)
messages.success(request, 'Deleted role %s for %s.' % (role.get_role_display(), role.person.name()))
# LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("deleted role: %s for %s in %s") % (
role.get_role_display(), role.person.name(), role.unit),
related_object=role.person)
l.save()
role.delete()
return HttpResponseRedirect(reverse('admin:unit_role_list'))
@requires_role('ADMN')
def unit_address(request, unit_slug):
unit = get_object_or_404(Unit, slug=unit_slug)
if unit not in Unit.sub_units(request.units):
return ForbiddenResponse(request, "Not an admin for this unit")
if request.method == 'POST':
form = UnitAddressForm(data=request.POST, unit=unit)
if form.is_valid():
#print form.cleaned_data
form.copy_to_unit()
unit.save()
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("updated contact info for %s") % (unit.label),
related_object=unit)
l.save()
return HttpResponseRedirect(reverse('admin:unit_admin'))
else:
form = UnitAddressForm(unit=unit)
context = {'unit': unit, 'form': form}
return render(request, "coredata/unit_address.html", context)
@requires_role('ADMN')
def missing_instructors(request, unit_slug):
unit = get_object_or_404(Unit, slug=unit_slug)
if unit not in Unit.sub_units(request.units):
return ForbiddenResponse(request, "Not an admin for this unit")
# build a set of all instructors that don't have an instructor-appropriate role
roles = dict(((r.person, r.role) for r in Role.objects.filter(unit=unit, role__in=INSTR_ROLES).select_related('person')))
missing = set()
long_ago = datetime.date.today() - datetime.timedelta(days=365*3)
instructors = Member.objects.filter(role="INST", offering__owner=unit,
offering__semester__start__gte=long_ago) \
.exclude(offering__component='CAN') \
.exclude(person__userid=None) \
.select_related('person')
for i in instructors:
if i.person not in roles:
missing.add(i.person)
missing = list(missing)
missing.sort()
initial = [{'person': p, 'role': None} for p in missing]
new_exp = datetime.date.today() + datetime.timedelta(days=365)
if request.method == 'POST':
formset = InstrRoleFormSet(request.POST, initial=initial)
if formset.is_valid():
count = 0
for f in formset.forms:
p = f.cleaned_data['person']
r = f.cleaned_data['role']
if r == "NONE" or p not in missing:
continue
r = Role(person=p, role=r, unit=unit, expiry=new_exp)
r.save()
count += 1
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("new role: %s as %s") % (p.userid, r),
related_object=r)
l.save()
messages.success(request, 'Set instructor roles for %i people.' % (count))
return HttpResponseRedirect(reverse('admin:unit_admin'))
else:
formset = InstrRoleFormSet(initial=initial)
context = {'formset': formset, 'unit': unit}
return render(request, 'coredata/missing_instructors.html', context)
# AJAX/JSON for course offering selector autocomplete
def offerings_search(request):
if 'term' not in request.GET:
return ForbiddenResponse(request, "Must provide 'term' query.")
term = request.GET['term']
response = HttpResponse(content_type='application/json')
data = []
query = get_query(term, ['subject', 'number', 'section', 'semester__name', 'title'])
offerings = CourseOffering.objects.filter(query).exclude(component="CAN").select_related('semester')
for o in offerings:
label = o.search_label_value()
d = {'value': o.id, 'label': label}
data.append(d)
json.dump(data, response, indent=1)
return response
# AJAX/JSON for course offering selector autocomplete with slugs
def offerings_slug_search(request, semester=None):
if 'term' not in request.GET:
return ForbiddenResponse(request, "Must provide 'term' query.")
term = request.GET['term']
response = HttpResponse(content_type='application/json')
data = []
query = get_query(term, ['subject', 'number', 'section', 'semester__name', 'title'])
offerings = CourseOffering.objects.filter(query).exclude(component="CAN").select_related('semester')
if semester:
offerings = offerings.filter(semester__name=semester)
for o in offerings:
label = o.search_label_value()
d = {'value': o.slug, 'label': label}
data.append(d)
json.dump(data, response, indent=1)
return response
# AJAX/JSON for course selector autocomplete
def course_search(request):
if 'term' not in request.GET:
return ForbiddenResponse(request, "Must provide 'term' query.")
term = request.GET['term']
response = HttpResponse(content_type='application/json')
data = []
query = get_query(term, ['subject', 'number', 'title'])
courses = Course.objects.filter(query)
for c in courses:
label = "%s %s" % (c.subject, c.number)
d = {'value': c.id, 'label': label}
data.append(d)
json.dump(data, response, indent=1)
return response
# AJAX/JSON for student search autocomplete
EXCLUDE_EMPLIDS = set(['953022983']) # exclude these from autocomplete
# 953022983 is an inactive staff account and should not be assigned things
@login_required
def student_search(request):
# check permissions
roles = Role.all_roles(request.user.username)
allowed = set(['ADVS', 'ADMN', 'GRAD', 'FUND', 'SYSA', 'FACA', 'FDRE'])
if not(roles & allowed) and not has_formgroup(request) and not has_global_role('DISC', request):
# doesn't have any allowed roles
return ForbiddenResponse(request, "Not permitted to do student search.")
if 'term' not in request.GET:
return ForbiddenResponse(request, "Must provide 'term' query.")
term = request.GET['term']
response = HttpResponse(content_type='application/json')
# do the query with Haystack
# experimentally, score >= 1 seems to correspond to useful things
student_qs = SearchQuerySet().models(Person).filter(text_fuzzy=term)[:20]
data = [{'value': r.emplid, 'label': r.search_display} for r in student_qs
if r and r.score >= 1 and str(r.emplid) not in EXCLUDE_EMPLIDS]
# non-haystack version of the above query
if len(student_qs) == 0:
studentQuery = get_query(term, ['userid', 'emplid', 'first_name', 'last_name'])
students = Person.objects.filter(studentQuery)[:20]
data = [{'value': s.emplid, 'label': s.search_label_value()} for s in students if str(s.emplid) not in EXCLUDE_EMPLIDS]
if 'nonstudent' in request.GET and 'ADVS' in roles:
nonStudentQuery = get_query(term, ['first_name', 'last_name', 'pref_first_name'])
nonStudents = NonStudent.objects.filter(nonStudentQuery)[:10]
data.extend([{'value': n.slug, 'label': n.search_label_value()} for n in nonStudents])
#data.sort(key = lambda x: x['label'])
json.dump(data, response, indent=1)
return response
def offering_by_id(request):
if 'id' not in request.GET:
return ForbiddenResponse(request, "Must provide 'id' query.")
id_ = request.GET['id']
try:
int(id_)
except ValueError:
return ForbiddenResponse(request, "'id' must be an integer.")
offering = get_object_or_404(CourseOffering, pk=id_)
return HttpResponse(offering.search_label_value())
from coredata.queries import find_person
@login_required
def XXX_sims_person_search(request):
# check permissions
roles = Role.all_roles(request.user.username)
allowed = set(['ADVS', 'ADMN', 'GRAD', 'FUND'])
if not(roles & allowed):
# doesn't have any allowed roles
return ForbiddenResponse(request, "Not permitted to do person search.")
if 'emplid' not in request.GET:
return ForbiddenResponse(request, "Must provide 'emplid' query.")
emplid = request.GET['emplid']
response = HttpResponse(content_type='application/json')
data = find_person(emplid)
json.dump(data, response, indent=1)
return response
def browse_courses(request):
"""
Interactive CourseOffering browser
"""
if 'tabledata' in request.GET:
# table data
return _offering_data(request)
if 'instructor_autocomplete' in request.GET:
# instructor autocomplete search
return _instructor_autocomplete(request)
# actually displaying the page at this point
form = OfferingFilterForm()
context = {
'form': form,
}
return render(request, 'coredata/browse_courses.html', context)
from django_datatables_view.base_datatable_view import BaseDatatableView
from django.db.models import Q, F
from django.conf import settings
import operator
import pytz
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from courselib.auth import NotFoundResponse
from coredata.models import CAMPUSES_SHORT
from coredata.forms import OfferingFilterForm, FLAG_DICT
from coredata.queries import more_offering_info, outlines_data_json, SIMSProblem
from dashboard.views import _offerings_calendar_data
COLUMNS = ['semester', 'coursecode', 'title', 'enrol', 'instructors', 'campus']
COLUMN_ORDERING = { # column -> ordering info for datatable_view
'semester': 'semester__name',
'coursecode': ['subject', 'number', 'section'],
'title': 'title',
'instructors': [],
'enrol': 'enrl_tot',
'campus': 'campus',
}
class OfferingDataJson(BaseDatatableView):
model = CourseOffering
max_display_length = 500
columns = COLUMNS
order_columns = [COLUMN_ORDERING[col] for col in columns]
def get_context_data(self, *args, **kwargs):
try:
return super().get_context_data(*args, **kwargs)
except:
raise Http404()
def render_column(self, offering, column):
if column == 'coursecode':
txt = '%s\u00a0%s\u00a0%s' % (offering.subject, offering.number, offering.section) # those are nbsps
url = reverse('browse:browse_courses_info', kwargs={'course_slug': offering.slug})
col = mark_safe('<a href="%s">%s</a>' % (url, conditional_escape(txt)))
elif column == 'instructors':
col = offering.instructors_printing_str()
elif column == 'campus':
col = CAMPUSES_SHORT[offering.campus]
elif column == 'enrol':
col = '%i/%i' % (offering.enrl_tot, offering.enrl_cap)
if offering.wait_tot:
col += ' (+%i)' % (offering.wait_tot,)
elif column == 'semester':
col = str(offering.semester).replace(' ', '\u00a0') # nbsp
elif hasattr(offering, 'get_%s_display' % column):
# it's a choice field
col = getattr(offering, 'get_%s_display' % column)()
else:
col = str(getattr(offering, column))
return conditional_escape(col)
def ordering(self, qs):
return super(OfferingDataJson, self).ordering(qs)
def filter_queryset(self, qs):
# use request parameters to filter queryset
GET = self.request.GET
# no cancelled courses
qs = qs.exclude(component='CAN')
# no courses outside the allowed semester range
qs = qs.filter(semester__in=OfferingFilterForm.allowed_semesters())
# no locally-merged courses
qs = qs.exclude(flags=CourseOffering.flags.combined)
srch = GET.get('search[value]', None)
if srch:
# non-haystack version:
#qs = qs.filter(Q(title__icontains=srch) | Q(number__icontains=srch) | Q(subject__icontains=srch) | Q(section__icontains=srch))
# get offering set from haystack, and use it to limit our query
offering_qs = SearchQuerySet().models(CourseOffering).filter(text__fuzzy=srch)[:500]
offering_pks = (r.pk for r in offering_qs if r is not None)
qs = qs.filter(pk__in=offering_pks)
subject = GET.get('subject[]', None)
if subject:
qs = qs.filter(subject=subject)
number = GET.get('number[]', None)
if number:
qs = qs.filter(number__istartswith=number)
section = GET.get('section[]', None)
if section:
qs = qs.filter(section__istartswith=section)
instructor = GET.get('instructor[]', None)
if instructor:
off_ids = Member.objects.order_by().filter(person__userid=instructor, role='INST').values_list('offering', flat=True)[:500]
#qs = qs.filter(id__in=off_ids)
# above should work, but production mySQL is ancient and can't do IN + LIMIT
if off_ids:
fake_in = reduce(operator.__or__, (Q(id=oid) for oid in off_ids))
qs = qs.filter(fake_in)
else:
qs = qs.none()
campus = GET.get('campus[]', None)
if campus:
qs = qs.filter(campus=campus)
semester = GET.get('semester[]', None)
if semester:
qs = qs.filter(semester__name=semester)
title = GET.get('crstitle[]', None)
if title:
# non-haystack version:
#qs = qs.filter(title__icontains=title)
# get offering set from haystack, and use it to limit our query
offering_qs = SearchQuerySet().models(CourseOffering).filter(title__fuzzy=title)[:500]
offering_pks = (r.pk for r in offering_qs if r is not None)
qs = qs.filter(pk__in=offering_pks)
wqb = GET.getlist('wqb[]')
for f in wqb:
if f not in FLAG_DICT:
continue # not in our list of flags: not safe to getattr
qs = qs.filter(flags=getattr(CourseOffering.flags, f))
mode = GET.get('mode[]', None)
if mode == 'dist':
qs = qs.filter(instr_mode='DE')
elif mode == 'on':
qs = qs.exclude(instr_mode='DE')
elif mode == 'day':
qs = qs.exclude(instr_mode='DE').exclude(section__startswith='E')
elif mode == 'eve':
qs = qs.exclude(instr_mode='DE').filter(section__startswith='E')
# free space filter
space_filters = GET.getlist('space[]')
if 'seats' in space_filters:
qs = qs.filter(enrl_tot__lt=F('enrl_cap'))
if 'nowait' in space_filters:
qs = qs.filter(wait_tot=0)
return qs
#def XXX_prepare_results(self, qs):
# "Prepare for mData-style data handling"
# data = []
# for item in qs:
# data.append(dict((column, self.render_column(item, column)) for column in self.get_columns()))
# return data
#def get_context_data(self, *args, **kwargs):
# data = super(OfferingDataJson, self).get_context_data(*args, **kwargs)
# data['colinfo'] = [(c, COLUMN_NAMES.get(c, '???')) for c in self.get_columns()]
# return data
_offering_data = OfferingDataJson.as_view()
def _instructor_autocomplete(request):
"""
Responses for the jQuery autocomplete for instructor search: key by userid not emplid for privacy
"""
if 'term' not in request.GET:
return ForbiddenResponse(request, "Must provide 'term' query.")
response = HttpResponse(content_type='application/json')
""" # non-haystack version
query = get_query(request.GET['term'], ['person__first_name', 'person__last_name', 'person__userid', 'person__middle_name'])
# matching person.id values who have actually taught a course
person_ids = Member.objects.filter(query).filter(role='INST') \
.exclude(person__userid=None).order_by() \
.values_list('person', flat=True).distinct()[:500]
person_ids = list(person_ids) # shouldn't be necessary, but production mySQL can't do IN + LIMIT
# get the Person objects: is there no way to do this in one query?
people = Person.objects.filter(id__in=person_ids)
"""
term = request.GET['term']
# strip any digits from the query, so users can't probe emplids with the search (emplid is the only digit-containing
# thing in the Person text index)
term = ''.join(c for c in term if not c.isdigit())
# query with haystack
person_qs = SearchQuerySet().models(Person).filter(text__fuzzy=term)[:100]
person_pks = (r.pk for r in person_qs if r is not None)
# go back to the database to limit to only instructors
instr_ids = Member.objects.filter(person_id__in=person_pks).filter(role='INST') \
.exclude(person__userid=None).order_by() \
.values_list('person', flat=True).distinct()[:20]
instr_ids = list(instr_ids) # shouldn't be necessary, but production mySQL can't do IN + LIMIT
people = Person.objects.filter(id__in=instr_ids)
data = [{'value': p.userid, 'label': p.name()} for p in people]
json.dump(data, response, indent=1)
return response
def browse_courses_info(request, course_slug):
"""
Browsing info about a single course offering.
"""
offering = get_object_or_404(CourseOffering, slug=course_slug)
if offering.flags.combined:
return NotFoundResponse(request)
if 'data' in request.GET:
# more_course_info data requested
response = HttpResponse(content_type='application/json')
try:
data = more_offering_info(offering, browse_data=True, offering_effdt=True)
except SIMSProblem as e:
data = {'error': str(e)}
json.dump(data, response, indent=1)
return response
elif 'caldata' in request.GET:
# calendar data requested
return _offering_meeting_time_data(request, offering)
elif 'outline' in request.GET:
# course outline data requested
response = HttpResponse(content_type='application/json')
data = outlines_data_json(offering)
response.write(data)
return response
# the page itself (with most data assembled by AJAX requests to the above)
context = {
'offering': offering,
}
return render(request, 'coredata/browse_courses_info.html', context)
def _offering_meeting_time_data(request, offering):
"""
fullcalendar.js data for this offering's events
"""
try:
st = iso8601.parse_date(request.GET['start'])
en = iso8601.parse_date(request.GET['end'])
except (KeyError, ValueError, iso8601.ParseError):
return NotFoundResponse(request, errormsg="Bad request")
local_tz = pytz.timezone(settings.TIME_ZONE)
start = st - datetime.timedelta(days=1)
end = en + datetime.timedelta(days=1)
response = HttpResponse(content_type='application/json')
data = list(_offerings_calendar_data([offering], None, start, end, local_tz,
dt_string=True, colour=True, browse_titles=True))
json.dump(data, response, indent=1)
return response
@requires_role("ADMN")
def new_temporary_person(request):
if request.method == 'POST':
form = TemporaryPersonForm(request.POST)
if form.is_valid():
p = Person( first_name = form.cleaned_data['first_name'],
last_name = form.cleaned_data['last_name'],
emplid = Person.next_available_temp_emplid(),
userid = Person.next_available_temp_userid(),
temporary = True)
if form.cleaned_data['email']:
p.config['email'] = form.cleaned_data['email']
if form.cleaned_data['sin']:
p.config['sin'] = form.cleaned_data['sin']
p.save()
messages.success(request, 'Added new temporary person %s' % (p,))
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("new temporary person: %s") % (p,),
related_object=p)
l.save()
return HttpResponseRedirect(reverse('admin:unit_admin'))
else:
form = TemporaryPersonForm()
return render(request, 'coredata/new_temporary_person.html', {'form': form})
@cached(3600*12)
def _has_homepages(unit_id, semester_id):
offerings = CourseOffering.objects.filter(owner_id=unit_id, semester_id=semester_id, graded=True) \
.exclude(component='CAN') \
.exclude(instr_mode__in=['CO', 'GI']) \
.filter(config__contains='"url"')
offerings = [o for o in offerings if 'url' in o.config]
return bool(offerings)
def course_home_pages(request):
semester = Semester.current()
units = Unit.objects.all().order_by('label')
units = [u for u in units if _has_homepages(u.id, semester.id)]
context = {
'semester': semester,
'units': units,
}
return render(request, "coredata/course_home_pages.html", context)
def course_home_pages_unit(request, unit_slug, semester=None):
if semester:
semester = get_object_or_404(Semester, name=semester)
else:
semester = Semester.current()
unit = get_object_or_404(Unit, slug=unit_slug)
offerings = CourseOffering.objects.filter(semester=semester, owner=unit, graded=True) \
.exclude(component='CAN') \
.exclude(instr_mode__in=['CO', 'GI'])
if request.user.is_authenticated:
is_admin = Role.objects_fresh.filter(unit=unit, person__userid=request.user.username, role='ADMN').exists()
else:
is_admin = False
context = {
'semester': semester,
'unit': unit,
'offerings': offerings,
'is_admin': is_admin,
}
return render(request, "coredata/course_home_pages_unit.html", context)
@requires_role('ADMN')
def course_home_admin(request, course_slug):
offering = get_object_or_404(CourseOffering, slug=course_slug, owner__in=request.units)
if request.method == 'POST':
form = CourseHomePageForm(data=request.POST)
if form.is_valid():
offering.set_url(form.cleaned_data['url'])
if 'maillist' in form.cleaned_data and form.cleaned_data['maillist']:
offering.set_maillist(form.cleaned_data['maillist'])
offering.save()
messages.success(request, 'Updated URL for %s.' % (offering.name()))
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("Updated course URL for %s.") % (offering.name()),
related_object=offering)
l.save()
return HttpResponseRedirect(reverse('browse:course_home_pages_unit', kwargs={'unit_slug': offering.owner.slug, 'semester': offering.semester.name}))
else:
form = CourseHomePageForm(initial={'url': offering.url(), 'maillist': offering.maillist()})
context = {
'offering': offering,
'form': form,
}
return render(request, "coredata/course_home_admin.html", context)
|
sfu-fas/coursys
|
coredata/views.py
|
Python
|
gpl-3.0
| 61,526
|
[
"VisIt"
] |
dc1136462a1f68c3f9b3ee6b2b24a921543f0689a8f40ae53e8254c815320205
|
#! /usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-admin-get-site-protocols
# Author : Stuart Paterson
########################################################################
"""
Check the defined protocols for all SEs of a given site
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile]' % Script.scriptName ] ) )
Script.registerSwitch( "S:", "Site=", "Site for which protocols are to be checked (mandatory)" )
Script.parseCommandLine( ignoreErrors = True )
site = None
for switch in Script.getUnprocessedSwitches():
if switch[0] == "Site" or switch[0] == "S":
site = switch[1]
from DIRAC import exit as DIRACExit
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
if not site:
Script.showHelp()
diracAdmin = DiracAdmin()
exitCode = 0
result = diracAdmin.getSiteProtocols( site, printOutput = True )
if not result['OK']:
print 'ERROR: %s' % result['Message']
exitCode = 2
DIRACExit( exitCode )
|
Sbalbp/DIRAC
|
Interfaces/scripts/dirac-admin-get-site-protocols.py
|
Python
|
gpl-3.0
| 1,196
|
[
"DIRAC"
] |
e1b3a54e6ede15033b50ba43ef1c9d3c857bffed83f47f5c111643aadfaff53c
|
# -*- coding: utf-8 -*-
import pytest
import inferbeddings.parse.clauses as clauses
@pytest.mark.light
def test_parse_clauses_one():
clause_str = 'p(x, y) :- p(x, z), q(z, a), r(a, y)'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert isinstance(clause, clauses.Clause)
assert isinstance(clause.head, clauses.Atom)
assert isinstance(clause.body, tuple)
assert isinstance(clause.head.predicate, clauses.Predicate)
assert isinstance(clause.head.arguments, tuple)
assert isinstance(clause.head.negated, bool)
assert clause.weight == 1.0
@pytest.mark.light
def test_parse_atom_clause():
clause_str = 'p(X, y)'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert isinstance(clause, clauses.Clause)
assert isinstance(clause.head, clauses.Atom)
assert len(clause.body) == 0
assert clause.head.predicate.name == "p"
assert isinstance(clause.head.arguments[0], clauses.Variable)
assert isinstance(clause.head.arguments[1], clauses.Constant)
assert clause.head.arguments[1].name == "y"
assert clause.weight == 1.0
@pytest.mark.light
def test_parse_weighted_atom_clause():
clause_str = 'p(X, y) < -1.2 >'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert clause.weight == -1.2
@pytest.mark.light
def test_parse_weighted_arity_2_clause():
clause_str = 'p(X, y) :- r(X,Z), q(X) < 1.2 >'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert clause.weight == 1.2
@pytest.mark.light
def test_parse_learnable_weight_arity_2_clause():
clause_str = 'p(X, y) :- r(X,Z), q(X) < ? >'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert clause.weight is None
@pytest.mark.light
def test_parse_learnable_weight_atom_clause():
clause_str = 'p(X, y) < ? >'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert clause.weight is None
@pytest.mark.light
def test_parse_clauses_two():
clause_str = '"P"(x, y) :- p(x, z), q(z, a), "R"(a, y)'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert isinstance(clause, clauses.Clause)
assert isinstance(clause.head, clauses.Atom)
assert isinstance(clause.head.predicate.name, str)
assert isinstance(clause.body, tuple)
assert isinstance(clause.head.predicate, clauses.Predicate)
assert isinstance(clause.head.arguments, tuple)
assert isinstance(clause.head.negated, bool)
assert clause.weight == 1.0
if __name__ == '__main__':
pytest.main([__file__])
|
uclmr/inferbeddings
|
tests/inferbeddings/parse/test_parsers.py
|
Python
|
mit
| 2,806
|
[
"VisIt"
] |
f3ea066f27541dc61fd95e1d8a54f2630a23022e705cf89e5da393743c5c00e2
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
from pymatgen.optimization.linear_assignment import LinearAssignment
import numpy as np
class LinearAssignmentTest(unittest.TestCase):
def test(self):
w0 = np.array([[19, 95, 9, 43, 62, 90, 10, 77, 71, 27],
[26, 30, 88, 78, 87, 2, 14, 71, 78, 11],
[48, 70, 26, 82, 32, 16, 36, 26, 42, 79],
[47, 46, 93, 66, 38, 20, 73, 39, 55, 51],
[ 1, 81, 31, 49, 20, 24, 95, 80, 82, 11],
[81, 48, 35, 54, 35, 55, 27, 87, 96, 7],
[42, 17, 60, 73, 37, 36, 79, 3, 60, 82],
[14, 57, 23, 69, 93, 78, 56, 49, 83, 36],
[11, 37, 24, 70, 62, 35, 64, 18, 99, 20],
[73, 11, 98, 50, 19, 96, 61, 73, 98, 14]])
w1 = np.array([[95, 60, 89, 38, 36, 38, 58, 94, 66, 23],
[37, 0, 40, 58, 97, 85, 18, 54, 86, 21],
[ 9, 74, 11, 45, 65, 64, 27, 88, 24, 26],
[58, 90, 6, 36, 17, 21, 2, 12, 80, 90],
[33, 0, 74, 75, 11, 84, 34, 7, 39, 0],
[17, 61, 94, 68, 27, 41, 33, 86, 59, 2],
[61, 94, 36, 53, 66, 33, 15, 87, 97, 11],
[22, 20, 57, 69, 15, 9, 15, 8, 82, 68],
[40, 0, 13, 61, 67, 40, 29, 25, 72, 44],
[13, 97, 97, 54, 5, 30, 44, 75, 16, 0]])
w2 = np.array([[34, 44, 72, 13, 10, 58, 16, 1, 10, 61],
[54, 70, 99, 4, 64, 0, 15, 94, 39, 46],
[49, 21, 80, 68, 96, 58, 24, 87, 79, 67],
[86, 46, 58, 83, 83, 56, 83, 65, 4, 96],
[48, 95, 64, 34, 75, 82, 64, 47, 35, 19],
[11, 49, 6, 57, 80, 26, 47, 63, 75, 75],
[74, 7, 15, 83, 64, 26, 78, 17, 67, 46],
[19, 13, 2, 26, 52, 16, 65, 24, 2, 98],
[36, 7, 93, 93, 11, 39, 94, 26, 46, 69],
[32, 95, 37, 50, 97, 96, 12, 70, 40, 93]])
la0 = LinearAssignment(w0)
self.assertEqual(la0.min_cost, 194, 'Incorrect cost')
la1 = LinearAssignment(w1)
self.assertEqual(la0.min_cost, la0.min_cost, 'Property incorrect')
self.assertEqual(la1.min_cost, 125, 'Incorrect cost')
la2 = LinearAssignment(w2)
self.assertEqual(la2.min_cost, 110, 'Incorrect cost')
def test_rectangular(self):
w0 = np.array([[19, 95, 9, 43, 62, 90, 10, 77, 71, 27],
[26, 30, 88, 78, 87, 2, 14, 71, 78, 11],
[48, 70, 26, 82, 32, 16, 36, 26, 42, 79],
[47, 46, 93, 66, 38, 20, 73, 39, 55, 51],
[ 1, 81, 31, 49, 20, 24, 95, 80, 82, 11],
[81, 48, 35, 54, 35, 55, 27, 87, 96, 7],
[42, 17, 60, 73, 37, 36, 79, 3, 60, 82],
[14, 57, 23, 69, 93, 78, 56, 49, 83, 36],
[11, 37, 24, 70, 62, 35, 64, 18, 99, 20]])
la0 = LinearAssignment(w0)
w1 = np.array([[19, 95, 9, 43, 62, 90, 10, 77, 71, 27],
[26, 30, 88, 78, 87, 2, 14, 71, 78, 11],
[48, 70, 26, 82, 32, 16, 36, 26, 42, 79],
[47, 46, 93, 66, 38, 20, 73, 39, 55, 51],
[ 1, 81, 31, 49, 20, 24, 95, 80, 82, 11],
[81, 48, 35, 54, 35, 55, 27, 87, 96, 7],
[42, 17, 60, 73, 37, 36, 79, 3, 60, 82],
[14, 57, 23, 69, 93, 78, 56, 49, 83, 36],
[11, 37, 24, 70, 62, 35, 64, 18, 99, 20],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,]])
la1 = LinearAssignment(w1)
self.assertEqual(len(la1.solution), 10)
self.assertEqual(la0.min_cost, la1.min_cost)
self.assertRaises(ValueError, LinearAssignment, w0.T)
def another_test_case(self):
w1 = np.array([[0.03900238875468465, 0.003202415721817453, 0.20107156847937024, 0.0, 0.5002116398420846,
0.11951326861160616, 0.0, 0.5469032363997579, 0.3243791041219123, 0.1119882291981289],
[0.6048342640688928, 0.3847629088356139, 0.0, 0.44358269535118944, 0.45925670625165016,
0.31416882324798145, 0.8065128182180494, 0.0, 0.26153475286065075, 0.6862799559241944],
[0.5597215814025246, 0.15133664165478322, 0.0, 0.6218101659263295, 0.15438455134183793,
0.17281467064043232, 0.8458127968475472, 0.020860721537078075, 0.1926886361228456, 0.0],
[0.0, 0.0, 0.6351848838666995, 0.21261247074659906, 0.4811603832432241, 0.6663733668270337,
0.63970145187428, 0.1415815172623256, 0.5294574133825874, 0.5576702829768786],
[0.25052904388309016, 0.2309392544588127, 0.0656162006684271, 0.0248922362001176, 0.0,
0.2101808638720748, 0.6529031699724193, 0.1503003886507902, 0.375576165698992,
0.7368328849560374],
[0.0, 0.042215873587668984, 0.10326920761908365, 0.3562551151517992, 0.9170343984958856,
0.818783531026254, 0.7896770426052844, 0.0, 0.6573135097946438, 0.17806189728574429],
[0.44992199118890386, 0.0, 0.38548898339412585, 0.6269193883601244, 1.0022861602564634, 0.0,
0.1869765500803764, 0.03474156273982543, 0.3715310534696664, 0.6197122486230232],
[0.37939853696836545, 0.2421427374018027, 0.5586150342727723, 0.0, 0.7171485794073893,
0.8021029235865014, 0.11213464903613135, 0.6497896761660467, 0.3274108706187846, 0.0],
[0.6674685746225324, 0.5347953626128863, 0.11461835366075113, 0.0, 0.8170639855163434,
0.7291931505979982, 0.3149153087053108, 0.1008681103294512, 0.0, 0.18751172321112997],
[0.6985944652913342, 0.6139921045056471, 0.0, 0.4393266955771965, 0.0, 0.47265399761400695,
0.3674241844351025, 0.04731761392352629, 0.21484886069716147, 0.16488710920126137]])
la = LinearAssignment(w1)
self.assertAlmostEqual(la.min_cost, 0)
def test_small_range(self):
# can be tricky for the augment step
x = np.array([[4, 5, 5, 6, 8, 4, 7, 4, 7, 8],
[5, 6, 6, 6, 7, 6, 6, 5, 6, 7],
[4, 4, 5, 7, 7, 4, 8, 4, 7, 7],
[6, 7, 6, 6, 7, 6, 6, 6, 6, 6],
[4, 4, 4, 6, 6, 4, 7, 4, 7, 7],
[4, 5, 5, 6, 8, 4, 7, 4, 7, 8],
[5, 7, 5, 5, 5, 6, 4, 5, 4, 6],
[8, 9, 8, 4, 5, 9, 4, 8, 4, 4],
[5, 6, 6, 6, 7, 6, 6, 5, 6, 7],
[5, 6, 6, 6, 7, 6, 6, 5, 6, 7]])
self.assertAlmostEqual(LinearAssignment(x).min_cost, 48)
def test_boolean_inputs(self):
w = np.ones((135,135), dtype=np.bool)
np.fill_diagonal(w, False)
la = LinearAssignment(w)
#if the input doesn't get converted to a float, the masking
#doesn't work properly
self.assertEqual(la.orig_c.dtype, np.float64)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
sonium0/pymatgen
|
pymatgen/optimization/tests/test_linear_assignment.py
|
Python
|
mit
| 7,666
|
[
"pymatgen"
] |
86eabb3de54dfbf92f00e890abf8d1242cac6058d5b552bd34fbf38242f2e1bc
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2012-2013 Michal Kalewski <mkalewski at cs.put.poznan.pl>
#
# This file is a part of the Simple Network Simulator (sim2net) project.
# USE, MODIFICATION, COPYING AND DISTRIBUTION OF THIS SOFTWARE IS SUBJECT TO
# THE TERMS AND CONDITIONS OF THE MIT LICENSE. YOU SHOULD HAVE RECEIVED A COPY
# OF THE MIT LICENSE ALONG WITH THIS SOFTWARE; IF NOT, YOU CAN DOWNLOAD A COPY
# FROM HTTP://WWW.OPENSOURCE.ORG/.
#
# For bug reports, feature and support requests please visit
# <https://github.com/mkalewski/sim2net/issues>.
"""
This package provides a collection of process failure models.
A *process failure* occurs whenever the process does not behave according to
its algorithm, and here the term *process* means the *application* running on
one of the nodes in the simulated network. To simulate such behaviors, process
failure models are used, and they differ in the nature and scope of faults.
Possible process failures may include ([CGR11]_): **crashes** (where a process
at some time may simply stop to execute any steps and never recovers);
**omissions** (where a process does not send or receive messages that it is
supposed to send or receive according to its algorithm); **crashes with
recoveries** (where a process *crashes* and never recovers or it keeps
infinitely often crashing and recovering); **eavesdropping** (where a process
leaks information obtained in its algorithm to an outside entity); and
**arbitrary** (where a process may deviate in any conceivable way from its
algorithm).
.. [CGR11] Christian Cachin, Rachid Guerraoui, Luís Rodrigues. Introduction
to Reliable and Secure Distributed Programming, 2ed Edition.
Springer-Verlag, 2011.
"""
__docformat__ = 'reStructuredText'
__all__ = ['crash']
|
mkalewski/sim2net
|
sim2net/failure/__init__.py
|
Python
|
mit
| 1,788
|
[
"VisIt"
] |
a1f62c261b7bd572a4a9c6f4e1aea75aad5737c71f162d53dc36e7f01cfc4c22
|
import numpy as np
from scipy.ndimage import gaussian_filter1d
from tensortools.cpwarp.shifted_cp import ShiftedCP
from tensortools.cpwarp.multishift import MultiShiftModel
def simulate_shifted_cp(
shape, rank, max_shift=.5, smoothness=2.0, noise_scale=.1, seed=None):
"""
Generates a synthetic dataset from a shifted decomposition.
Parameters
----------
shape : tuple
Tuple of three integers specifying num_trials, num_timepoints,
num_units.
max_shift : float
Largest allowable shift expressed as a fraction of trial length.
smoothness : float
Specifies width of gaussian smoothing kernel applied to ground
truth model along the temporal dimension.
noise_scale : float
Standard deviation of truncated Gaussian noise.
seed : RandomState, int, or None
Seeds random number generator.
Returns
-------
X : ndarray
Tensor of simulated date (num_trials x num_timepoints x num_units).
true_model : ShiftedDecomposition
Object holding the true factors.
"""
rs = np.random.RandomState(seed)
factors = [
rs.rand(rank, shape[0]),
rs.exponential(1.0, size=(rank, shape[1])),
rs.rand(rank, shape[2]),
]
# factors[0] *= (factors[0] > np.percentile(factors[1], 50))
# factors[2] *= (factors[2] > np.percentile(factors[1], 50))
factors[1] *= (factors[1] > np.percentile(factors[1], 90))
factors[1] = gaussian_filter1d(factors[1], smoothness, axis=-1)
b = max_shift * shape[1]
shifts = rs.uniform(-b, b, size=(rank, shape[0]))
true_model = ShiftedCP(factors, shifts)
true_model.rebalance()
X = true_model.predict()
X += rs.randn(*shape) * noise_scale
# X = np.maximum(0.0, X)
return X, true_model
def simulate_multishift(
shape, rank, max_shift=.5, trial_factor_sparsity=.5,
smoothness=2.0, noise_scale=.1, seed=None):
"""
Generates a synthetic dataset from a multi-warp model.
Parameters
----------
shape : tuple
Tuple of three integers specifying num_trials, num_timepoints,
num_units.
max_shift : float
Largest allowable shift expressed as a fraction of trial length.
trial_factor_sparsity : float
Dirichlet distribution parameter, smaller values correspond to
more sparse (one-hot) loadings on the trial factors.
smoothness : float
Specifies width of gaussian smoothing kernel applied to ground
truth model along the temporal dimension.
noise_scale : float
Standard deviation of truncated Gaussian noise.
seed : RandomState, int, or None
Seeds random number generator.
Returns
-------
X : ndarray
Tensor of simulated date (num_trials x num_timepoints x num_units).
true_model : MultiShiftModel
Object holding the true model.
"""
K, T, N = shape
rs = np.random.RandomState(seed)
_tmp = rs.exponential(1.0, size=(rank, T, N))
_tmp *= (_tmp > np.percentile(_tmp, 95))
templates = gaussian_filter1d(_tmp, smoothness, axis=1)
trial_factors = np.random.dirichlet(
[trial_factor_sparsity for _ in range(rank)], size=K).T
shifts = rs.uniform(
-max_shift * T, max_shift * T, size=(rank, K))
true_model = MultiShiftModel(
templates, trial_factors, shifts=shifts, periodic=True)
X = true_model.predict()
X += rs.randn(*shape) * noise_scale
return X, true_model
|
ahwillia/tensortools
|
tensortools/cpwarp/datasets.py
|
Python
|
mit
| 3,512
|
[
"Gaussian"
] |
f97f5ff3da1f37ec5c7f3870725ed4836c244896bdecbc4cfe82f0b9b9c52a6e
|
"""
Example of plotting a 3D vector field
"""
# set up some data to plot
from numpy import *
dim = 10
# initialise the positions of the vectors
x = zeros((dim,dim), dtype=floating)
y = zeros((dim,dim), dtype=floating)
z = zeros((dim,dim), dtype=floating)
# initialise the vector displacements
# (I may need to rethink how this works in the interface)
dx = zeros((dim,dim), dtype=floating)
dy = zeros((dim,dim), dtype=floating)
dz = zeros((dim,dim), dtype=floating)
# set the positions randomly, and set the displacements to some smaller
# random number but of mean zero instead of distributed between 0 and 1
import random
random.seed()
for i in range(dim):
for j in range(dim):
x[i,j] = random.random()
y[i,j] = random.random()
z[i,j] = random.random()
dx[i,j] = (random.random()-0.5)/5.0
dy[i,j] = (random.random()-0.5)/5.0
dz[i,j] = (random.random()-0.5)/5.0
#### original vtk code
import vtk
# loading a vtk file as input
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName("../../vel-0004.vtk")
reader.Update()
grid = reader.GetOutput()
# grab the model centre and bounds
centre = grid.GetCenter()
bounds = grid.GetBounds()
# grab the norm of the vectors
norm = vtk.vtkVectorNorm()
norm.SetInput(grid)
maxNorm = grid.GetPointData().GetVectors().GetMaxNorm()
# to make arrow glyphs need an arrow source
arrow = vtk.vtkArrowSource()
# the arrows are 3D glyphs so set that up now
glyph = vtk.vtkGlyph3D()
glyph.ScalingOn()
glyph.SetScaleModeToScaleByScalar()
glyph.SetColorModeToColorByScalar()
glyph.SetVectorModeToUseVector()
glyph.SetScaleFactor(0.1/maxNorm)
glyph.SetInput(norm.GetOutput())
glyph.SetSource(arrow.GetOutput())
glyph.ClampingOff()
# set up a stripper to speed up rendering
stripper = vtk.vtkStripper()
stripper.SetInput(glyph.GetOutput())
# make a lookup table for the colour map and invert it (colours look
# better when it's inverted)
lut = vtk.vtkLookupTable()
refLut = vtk.vtkLookupTable()
lut.Build()
refLut.Build()
for j in range(256):
lut.SetTableValue(j, refLut.GetTableValue(255-j))
# set up the mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(stripper.GetOutput())
mapper.SetScalarRange(0,maxNorm)
mapper.SetLookupTable(lut)
# set up the actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# set up the text properties for nice text
font_size = 20
textProp = vtk.vtkTextProperty()
textProp.SetFontSize(font_size)
textProp.SetFontFamilyToArial()
textProp.BoldOff()
textProp.ItalicOff()
textProp.ShadowOff()
textProp.SetColor(0.0, 0.0, 0.0)
# make a title
title = vtk.vtkTextMapper()
title.SetInput("Example 3D arrow/quiver/vector field plot")
# make the title text use the text properties
titleProp = title.GetTextProperty()
titleProp.ShallowCopy(textProp)
titleProp.SetJustificationToCentered()
titleProp.SetVerticalJustificationToTop()
titleProp.BoldOn()
# make the actor for the title
titleActor = vtk.vtkTextActor()
titleActor.SetMapper(title)
titleActor.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
titleActor.GetPositionCoordinate().SetValue(0.5, 0.95)
# put an outline around the data
outline = vtk.vtkOutlineSource()
outline.SetBounds(bounds)
# make its mapper
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInput(outline.GetOutput())
# make its actor
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0,0,0)
# set up the renderer and render window
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetSize(800,600)
renWin.AddRenderer(ren)
ren.SetBackground(1,1,1)
# add the relevant actors
ren.AddActor(actor)
ren.AddActor(titleActor)
ren.AddActor(outlineActor)
cam = ren.GetActiveCamera()
#cam.Azimuth(0)
#cam.Elevation(-90)
cam.Zoom(1.2)
ren.SetActiveCamera(cam)
ren.ResetCameraClippingRange()
# add some axes
axes = vtk.vtkCubeAxesActor2D()
axes.SetInput(grid)
axes.SetCamera(ren.GetActiveCamera())
axes.SetLabelFormat("%6.4g")
axes.SetFlyModeToOuterEdges()
axes.SetFontFactor(0.8)
axes.SetAxisTitleTextProperty(textProp)
axes.SetAxisLabelTextProperty(textProp)
axes.SetXLabel("x")
axes.SetYLabel("y")
axes.SetZLabel("z")
axes.SetNumberOfLabels(5)
axes.GetProperty().SetColor(0,0,0)
ren.AddProp(axes)
# set up stuff for interactive viewing
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
renWin.Render()
iren.Start()
# the WindowToImageFilter is what one uses to save the window to an
# image file
win2img = vtk.vtkWindowToImageFilter()
win2img.SetInput(renWin)
# set up the PNMWriter as we're saving to png
writer = vtk.vtkPNGWriter()
writer.SetFileName("arrowPlot3D.png")
writer.SetInput(win2img.GetOutput())
writer.Write()
# vim: expandtab shiftwidth=4:
|
paultcochrane/pyvisi
|
examples/renderers/vtk/arrowPlot3D.py
|
Python
|
gpl-2.0
| 4,745
|
[
"VTK"
] |
e42d0c13ab7bb044b2bb98fc384b1c74f5a94cddf9b195d781fff9a809332cdd
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A source for reading from VCF files (version 4.x).
The 4.2 spec is available at https://samtools.github.io/hts-specs/VCFv4.2.pdf.
"""
from typing import Any, Iterable, List, Tuple # pylint: disable=unused-import
from functools import partial
import apache_beam as beam
from apache_beam.coders import coders
from apache_beam.io import filebasedsource
from apache_beam.io import filesystems
from apache_beam.io import range_trackers # pylint: disable=unused-import
from apache_beam.io import textio
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.transforms import PTransform
from gcp_variant_transforms.beam_io import bgzf_io
from gcp_variant_transforms.beam_io import vcf_parser
# All other modules depend on vcfio for the following const values.
# In order to keep the current setting we re-declared them here.
MalformedVcfRecord = vcf_parser.MalformedVcfRecord
MISSING_FIELD_VALUE = vcf_parser.MISSING_FIELD_VALUE
PASS_FILTER = vcf_parser.PASS_FILTER
END_INFO_KEY = vcf_parser.END_INFO_KEY
GENOTYPE_FORMAT_KEY = vcf_parser.GENOTYPE_FORMAT_KEY
PHASESET_FORMAT_KEY = vcf_parser.PHASESET_FORMAT_KEY
DEFAULT_PHASESET_VALUE = vcf_parser.DEFAULT_PHASESET_VALUE
MISSING_GENOTYPE_VALUE = vcf_parser.MISSING_GENOTYPE_VALUE
Variant = vcf_parser.Variant
VariantCall = vcf_parser.VariantCall
SampleNameEncoding = vcf_parser.SampleNameEncoding
class _ToVcfRecordCoder(coders.Coder):
"""Coder for encoding :class:`Variant` objects as VCF text lines."""
def __init__(self, bq_uses_1_based_coordinate):
# type: (bool) -> None
"""Initialize _ToVcfRecordCoder PTransform.
Args:
bq_uses_1_based_coordinate: specify whether the coordinates used to in BQ
1-based (default) or 0-based. To find out examine start_position column
description.
"""
self.bq_uses_1_based_coordinate = bq_uses_1_based_coordinate
def encode(self, variant):
# type: (Variant) -> bytes
"""Converts a :class:`Variant` object back to a VCF line."""
encoded_info = self._encode_variant_info(variant)
format_keys = self._get_variant_format_keys(variant)
encoded_calls = self._encode_variant_calls(variant, format_keys)
columns = [
variant.reference_name,
(None if variant.start is None
else (variant.start if self.bq_uses_1_based_coordinate
else variant.start + 1)),
';'.join(variant.names),
variant.reference_bases,
','.join(variant.alternate_bases),
variant.quality,
';'.join(variant.filters),
encoded_info,
':'.join(format_keys),
]
if encoded_calls:
columns.append(encoded_calls)
columns = [self._encode_value(c) for c in columns]
return ('\t'.join(columns) + '\n').encode('utf-8')
def _encode_value(self, value):
# type: (Any) -> str
"""Encodes a single `Variant` column value for a VCF file line."""
if not value and value != 0:
return MISSING_FIELD_VALUE
elif isinstance(value, list):
return ','.join([self._encode_value(x) for x in value])
return value.decode('utf-8') if isinstance(value, bytes) else str(value)
def _encode_variant_info(self, variant):
"""Encodes the info of a :class:`Variant` for a VCF file line."""
encoded_infos = []
start_0_based = (None if variant.start is None
else (variant.start - 1 if self.bq_uses_1_based_coordinate
else variant.start))
# Set END in info if it doesn't match len(reference_bases)+start in 0-based
# coordinate system. This is usually the case for non-variant regions.
if (variant.start is not None
and variant.reference_bases
and variant.end
and start_0_based + len(variant.reference_bases) != variant.end):
encoded_infos.append('END=%d' % variant.end)
# Set all other fields of info.
for k, v in variant.info.items():
if v is True:
encoded_infos.append(k)
else:
encoded_infos.append('%s=%s' % (str(k), self._encode_value(v)))
return ';'.join(encoded_infos)
def _get_variant_format_keys(self, variant):
"""Gets the format keys of a :class:`Variant`."""
if not variant.calls:
return []
format_keys = [GENOTYPE_FORMAT_KEY]
for call in variant.calls:
# If any calls have a set phaseset that is not `DEFAULT_PHASESET_VALUE`,
# the key will be added to the format field.
if self._is_alternate_phaseset(call.phaseset):
format_keys.append(PHASESET_FORMAT_KEY)
format_keys.extend(list(k for k in call.info))
# Sort all keys and remove duplicates after GENOTYPE_FORMAT_KEY
format_keys[1:] = sorted(list(set(format_keys[1:])))
return format_keys
def _encode_variant_calls(self, variant, format_keys):
# type: (Variant, List[str]) -> str
"""Encodes the calls of `Variant` in a VCF line."""
# Ensure that genotype is always the first key in format_keys
assert not format_keys or format_keys[0] == GENOTYPE_FORMAT_KEY
encoded_calls = []
for call in variant.calls:
encoded_call_info = [self._encode_genotype(call.genotype, call.phaseset)]
for key in format_keys[1:]:
if key == PHASESET_FORMAT_KEY:
encoded_call_info.append(
self._encode_phaseset(call.phaseset))
else:
encoded_call_info.append(
self._encode_call_info_value(call.info, key))
encoded_calls.append(':'.join(encoded_call_info))
return '\t'.join(encoded_calls)
def _encode_genotype(self, genotype, phaseset):
"""Encodes the genotype of a :class:`VariantCall` for a VCF file line."""
if genotype == MISSING_GENOTYPE_VALUE:
return MISSING_FIELD_VALUE
encoded_genotype = []
for allele in genotype:
if allele == MISSING_GENOTYPE_VALUE:
encoded_genotype.append(MISSING_FIELD_VALUE)
else:
encoded_genotype.append(self._encode_value(allele))
phase_char = '|' if phaseset else '/'
return phase_char.join(encoded_genotype) or MISSING_FIELD_VALUE
def _encode_phaseset(self, phaseset):
"""Encodes the phaseset of a :class:`VariantCall` for a VCF file line."""
if self._is_alternate_phaseset(phaseset):
return phaseset
return MISSING_FIELD_VALUE
def _is_alternate_phaseset(self, phaseset):
return phaseset and phaseset != DEFAULT_PHASESET_VALUE
def _encode_call_info_value(self, info, key):
"""Encodes the info of a :class:`VariantCall` for a VCF file line."""
if key in info:
return self._encode_value(info[key])
return MISSING_FIELD_VALUE
class _VcfSource(filebasedsource.FileBasedSource):
"""A source for reading VCF files.
Parses VCF files (version 4) using PySam library. If file_pattern specifies
multiple files, then the header from each file is used separately to parse
the content. However, the output will be a uniform PCollection of
:class:`Variant` objects.
"""
DEFAULT_VCF_READ_BUFFER_SIZE = 65536 # 64kB
def __init__(
self,
file_pattern, # type: str
representative_header_lines=None, # type: List[str]
compression_type=CompressionTypes.AUTO, # type: str
buffer_size=DEFAULT_VCF_READ_BUFFER_SIZE, # type: int
validate=True, # type: bool
allow_malformed_records=False, # type: bool
pre_infer_headers=False, # type: bool
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH, # type: int
use_1_based_coordinate=False, # type: bool
move_hom_ref_calls=False # type: bool
):
# type: (...) -> None
super().__init__(file_pattern,
compression_type=compression_type,
validate=validate)
self._representative_header_lines = representative_header_lines
self._compression_type = compression_type
self._buffer_size = buffer_size
self._allow_malformed_records = allow_malformed_records
self._pre_infer_headers = pre_infer_headers
self._sample_name_encoding = sample_name_encoding
self._use_1_based_coordinate = use_1_based_coordinate
self._move_hom_ref_calls = move_hom_ref_calls
def read_records(self,
file_name, # type: str
range_tracker # type: range_trackers.OffsetRangeTracker
):
# type: (...) -> Iterable[MalformedVcfRecord]
record_iterator = vcf_parser.PySamParser(
file_name,
range_tracker,
self._compression_type,
self._allow_malformed_records,
file_pattern=self._pattern,
representative_header_lines=self._representative_header_lines,
pre_infer_headers=self._pre_infer_headers,
sample_name_encoding=self._sample_name_encoding,
use_1_based_coordinate=self._use_1_based_coordinate,
move_hom_ref_calls=self._move_hom_ref_calls,
buffer_size=self._buffer_size,
skip_header_lines=0)
# Convert iterator to generator to abstract behavior
for record in record_iterator:
yield record
class ReadFromBGZF(beam.PTransform):
"""Reads variants from BGZF."""
def __init__(self,
input_files,
representative_header_lines,
allow_malformed_records,
pre_infer_headers,
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH,
use_1_based_coordinate=False,
move_hom_ref_calls=False
):
# type: (List[str], List[str], bool, bool, int, bool, bool) -> None
"""Initializes the transform.
Args:
input_files: The BGZF file paths to read from.
representative_header_lines: Header definitions to be used for parsing
VCF files.
allow_malformed_records: If true, malformed records from VCF files will be
returned as `MalformedVcfRecord` instead of failing the pipeline.
pre_infer_headers: If true, drop headers and make sure PySam return the
exact data for variants and calls, without type matching.
sample_name_encoding: specify how we want to encode sample_name mainly
to deal with same sample_name used across multiple VCF files.
use_1_based_coordinate: specify whether the coordinates should be stored
in BQ using 0-based exclusive (default) or 1-based inclusive coordinate.
move_hom_ref_calls: If true, filter out 0 GT data out of call list and add
the call name to a hom_ref_calls column.
"""
self._input_files = input_files
self._representative_header_lines = representative_header_lines
self._allow_malformed_records = allow_malformed_records
self._pre_infer_headers = pre_infer_headers
self._sample_name_encoding = sample_name_encoding
self._use_1_based_coordinate = use_1_based_coordinate
self._move_hom_ref_calls = move_hom_ref_calls
def _read_records(self, file_path_and_block_tuple):
# type: (Tuple[str, Block]) -> Iterable(Variant)
"""Reads records from `file_path` in `block`."""
(file_path, block) = file_path_and_block_tuple
record_iterator = vcf_parser.PySamParser(
file_path,
block,
filesystems.CompressionTypes.GZIP,
self._allow_malformed_records,
representative_header_lines=self._representative_header_lines,
splittable_bgzf=True,
pre_infer_headers=self._pre_infer_headers,
sample_name_encoding=self._sample_name_encoding,
use_1_based_coordinate=self._use_1_based_coordinate,
move_hom_ref_calls=self._move_hom_ref_calls)
for record in record_iterator:
yield record
def expand(self, pcoll):
return (pcoll
| 'InputFiles' >> beam.Create(self._input_files)
| 'SplitSource' >> beam.FlatMap(bgzf_io.split_bgzf)
| 'Reshuffle' >> beam.Reshuffle()
| 'ReadBlock' >> beam.ParDo(self._read_records))
class ReadFromVcf(PTransform):
"""A :class:`~apache_beam.transforms.ptransform.PTransform` for reading VCF
files.
Parses VCF files (version 4) using PySam library. If file_pattern specifies
multiple files, then the header from each file is used separately to parse
the content. However, the output will be a PCollection of
:class:`Variant` (or :class:`MalformedVcfRecord for failed reads) objects.
"""
def __init__(
self,
file_pattern=None, # type: str
representative_header_lines=None, # type: List[str]
compression_type=CompressionTypes.AUTO, # type: str
validate=True, # type: bool
allow_malformed_records=False, # type: bool
pre_infer_headers=False, # type: bool
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH, # type: int
use_1_based_coordinate=False, # type: bool
move_hom_ref_calls=False, # type: bool
**kwargs # type: **str
):
# type: (...) -> None
"""Initialize the :class:`ReadFromVcf` transform.
Args:
file_pattern: The file path to read from either as a single file or a
glob pattern.
representative_header_lines: Header definitions to be used for parsing
VCF files. If supplied, header definitions in VCF files are ignored.
compression_type: Used to handle compressed input files. Typical value is
:attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
underlying file_path's extension will be used to detect the compression.
validate: flag to verify that the files exist during the pipeline creation
time.
pre_infer_headers: If true, drop headers and make sure PySam return the
exact data for variants and calls, without type matching.
sample_name_encoding: specify how we want to encode sample_name mainly
to deal with same sample_name used across multiple VCF files.
use_1_based_coordinate: specify whether the coordinates should be stored
in BQ using 0-based exclusive (default) or 1-based inclusive coordinate.
move_hom_ref_calls: If true, filter out 0 GT data out of call list and add
the call name to a hom_ref_calls column.
"""
super().__init__(**kwargs)
self._source = _VcfSource(
file_pattern,
representative_header_lines,
compression_type,
validate=validate,
allow_malformed_records=allow_malformed_records,
pre_infer_headers=pre_infer_headers,
sample_name_encoding=sample_name_encoding,
use_1_based_coordinate=use_1_based_coordinate,
move_hom_ref_calls=move_hom_ref_calls)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
def _create_vcf_source(
file_pattern=None,
representative_header_lines=None,
compression_type=None,
allow_malformed_records=None,
pre_infer_headers=False,
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH,
use_1_based_coordinate=False,
move_hom_ref_calls=False):
return _VcfSource(file_pattern=file_pattern,
representative_header_lines=representative_header_lines,
compression_type=compression_type,
allow_malformed_records=allow_malformed_records,
pre_infer_headers=pre_infer_headers,
sample_name_encoding=sample_name_encoding,
use_1_based_coordinate=use_1_based_coordinate,
move_hom_ref_calls=move_hom_ref_calls)
class ReadAllFromVcf(PTransform):
"""A :class:`~apache_beam.transforms.ptransform.PTransform` for reading a
:class:`~apache_beam.pvalue.PCollection` of VCF files.
Reads a :class:`~apache_beam.pvalue.PCollection` of VCF files or file patterns
and produces a PCollection :class:`Variant` (or
:class:`MalformedVcfRecord for failed reads) objects.
This transform should be used when reading from massive (>70,000) number of
files.
"""
DEFAULT_DESIRED_BUNDLE_SIZE = 64 * 1024 * 1024 # 64MB
def __init__(
self,
representative_header_lines=None, # type: List[str]
desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE, # type: int
compression_type=CompressionTypes.AUTO, # type: str
allow_malformed_records=False, # type: bool
pre_infer_headers=False, # type: bool
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH, # type: int
use_1_based_coordinate=False, # type: bool
move_hom_ref_calls=False, # type: bool
**kwargs # type: **str
):
# type: (...) -> None
"""Initialize the :class:`ReadAllFromVcf` transform.
Args:
representative_header_lines: Header definitions to be used for parsing VCF
files. If supplied, header definitions in VCF files are ignored.
desired_bundle_size: Desired size of bundles that should be generated when
splitting this source into bundles. See
:class:`~apache_beam.io.filebasedsource.FileBasedSource` for more
details.
compression_type: Used to handle compressed input files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
underlying file_path's extension will be used to detect the compression.
allow_malformed_records: If true, malformed records from VCF files will be
returned as :class:`MalformedVcfRecord` instead of failing the pipeline.
pre_infer_headers: If true, drop headers and make sure PySam return the
exact data for variants and calls, without type matching.
sample_name_encoding: specify how we want to encode sample_name mainly
to deal with same sample_name used across multiple VCF files.
use_1_based_coordinate: specify whether the coordinates should be stored
in BQ using 0-based exclusive (default) or 1-based inclusive coordinate.
move_hom_ref_calls: If true, filter out 0 GT data out of call list and add
the call name to a hom_ref_calls column.
"""
super().__init__(**kwargs)
source_from_file = partial(
_create_vcf_source,
representative_header_lines=representative_header_lines,
compression_type=compression_type,
allow_malformed_records=allow_malformed_records,
pre_infer_headers=pre_infer_headers,
sample_name_encoding=sample_name_encoding,
use_1_based_coordinate=use_1_based_coordinate,
move_hom_ref_calls=move_hom_ref_calls)
self._read_all_files = filebasedsource.ReadAllFiles(
True, # splittable
CompressionTypes.AUTO, desired_bundle_size,
0, # min_bundle_size
source_from_file)
def expand(self, pvalue):
return pvalue | 'ReadAllFiles' >> self._read_all_files
class WriteToVcf(PTransform):
"""A PTransform for writing to VCF files."""
def __init__(self,
file_path,
num_shards=1,
compression_type=CompressionTypes.AUTO,
headers=None,
bq_uses_1_based_coordinate=True):
# type: (str, int, str, List[str]) -> None
"""Initialize a WriteToVcf PTransform.
Args:
file_path: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards). The
file path should include the file extension (i.e. ".vcf", ".vcf.gz",
etc).
num_shards: The number of files (shards) used for output. If not set, the
service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
compression_type: Used to handle compressed output files. Typical value
for VCF files is CompressionTypes.UNCOMPRESSED. If set to
CompressionTypes.AUTO, file_path's extension will be used to detect
compression.
headers: A list of VCF meta-information lines describing the at least the
INFO and FORMAT entries in each record and a header line describing the
column names. These lines will be written at the beginning of the file.
bq_uses_1_based_coordinate: specify whether the coordinates used to in BQ
1-based (default) or 0-based. To find out examine start_position column
description.
"""
self._file_path = file_path
self._num_shards = num_shards
self._compression_type = compression_type
self._header = headers and '\n'.join([h.strip() for h in headers]) + '\n'
self.bq_uses_1_based_coordinate = bq_uses_1_based_coordinate
def expand(self, pcoll):
return pcoll | 'WriteToVCF' >> textio.WriteToText(
self._file_path,
append_trailing_newlines=False,
num_shards=self._num_shards,
coder=_ToVcfRecordCoder(self.bq_uses_1_based_coordinate),
compression_type=self._compression_type,
header=self._header)
class _WriteVcfDataLinesFn(beam.DoFn):
"""A function that writes variants to one VCF file."""
def __init__(self, bq_uses_1_based_coordinate):
# type: (bool) -> None
"""Initialize _WriteVcfDataLinesFn DoFn function.
Args:
bq_uses_1_based_coordinate: specify whether the coordinates used to in BQ
1-based (default) or 0-based. To find out examine start_position column
description.
"""
self._coder = _ToVcfRecordCoder(bq_uses_1_based_coordinate)
def process(self, file_path_and_variants_tuple, *args, **kwargs): # pylint: disable=unused-argument
# type: (Tuple[str, List[Variant]]) -> None
(file_path, variants) = file_path_and_variants_tuple
with filesystems.FileSystems.create(file_path) as file_to_write:
for variant in variants:
file_to_write.write(self._coder.encode(variant))
class WriteVcfDataLines(PTransform):
"""A PTransform for writing VCF data lines.
This PTransform takes PCollection<`file_path`, `variants`> as input, and
writes `variants` to `file_path`. The PTransform `WriteToVcf` takes
PCollection<`Variant`> as input, and writes all variants to the same file.
"""
def __init__(self, bq_uses_1_based_coordinate):
# type: (bool) -> None
"""Initialize WriteVcfDataLines PTransform.
Args:
bq_uses_1_based_coordinate: specify whether the coordinates used to in BQ
1-based (default) or 0-based. To find out examine start_position column
description.
"""
self.bq_uses_1_based_coordinate = bq_uses_1_based_coordinate
def expand(self, pcoll):
return pcoll | 'WriteToVCF' >> beam.ParDo(_WriteVcfDataLinesFn(
self.bq_uses_1_based_coordinate))
|
googlegenomics/gcp-variant-transforms
|
gcp_variant_transforms/beam_io/vcfio.py
|
Python
|
apache-2.0
| 23,219
|
[
"pysam"
] |
5a2bdc6664560e85ff920085aa79b20483fee4057eced9be559e76919b5b7159
|
"""
Set of programs and tools to read the outputs from RH, 1.5D version
"""
import os
import datetime
import numpy as np
import xarray as xr
import h5py
class Rh15dout:
"""
Class to load and manipulate output from RH 1.5D.
"""
def __init__(self, fdir='.', verbose=True, autoread=True):
self.files = []
self.params = {}
self.verbose = verbose
self.fdir = fdir
if autoread:
for outfile in ["output_aux", "output_indata"]:
OUTFILE = os.path.join(self.fdir, "%s.hdf5" % (outfile))
self.read_groups(OUTFILE)
RAYFILE = os.path.join(self.fdir, "output_ray.hdf5")
self.read_ray(RAYFILE)
def read_groups(self, infile):
''' Reads indata file, group by group. '''
if not os.path.isfile(infile): # See if netCDF file exists
infile = os.path.splitext(infile)[0] + '.ncdf'
if not os.path.isfile(infile):
return
f = h5py.File(infile, "r")
GROUPS = [g for g in f.keys() if type(f[g]) == h5py._hl.group.Group]
f.close()
for g in GROUPS:
setattr(self, g, xr.open_dataset(infile, group=g, autoclose=True))
self.files.append(getattr(self, g))
if self.verbose:
print(('--- Read %s file.' % infile))
def read_ray(self, infile=None):
''' Reads ray file. '''
if infile is None:
infile = '%s/output_ray.hdf5' % self.fdir
if not os.path.isfile(infile): # See if netCDF file exists
infile = os.path.splitext(infile)[0] + '.ncdf'
if not os.path.isfile(infile):
return
self.ray = xr.open_dataset(infile, autoclose=True)
self.files.append(self.ray)
if self.verbose:
print(('--- Read %s file.' % infile))
def close(self):
''' Closes the open files '''
for f in self.files:
f.close()
def __del__(self):
self.close()
class HDF5Atmos:
"""
Class to load and manipulate RH 1.5D input atmosphere files in HDF5.
"""
def __init__(self, infile):
self.file = read_hdf5(self, infile)
self.closed = False
def close(self):
try:
self.file.close()
self.closed = True
except RuntimeError:
print('(WWW) HDF5Atmos: input file already closed.')
def read(self, infile):
if not self.closed:
self.close()
self.file = read_hdf5(self, infile)
def write_multi(self, outfile, xi, yi, nti=0, writeB=False,
write_dscale=False, zcut=0, depth_optimise=False):
''' Writes MULTI atmosphere file from a column of the 3D model,
in RH 1.5D HDF5 format. Also writes the binary XDR file with magnetic
fields, if writeB is true.
'''
from .multi import watmos_multi
from .rh import write_B
writeB = writeB and self.params['has_B']
# if only total H available, will have to use rhpy (which is sometimes
# risky...)
if self.params['nhydr'] == 1:
try:
import rhpy
except ImportError:
raise ValueError("This function depents on rhpy, which is not"
" installed in this system.")
nh = rhpy.nh_lte(self.temperature[nti, xi, yi, zcut:].astype('Float64'),
self.electron_density[
nti, xi, yi, zcut:].astype('Float64'),
self.hydrogen_populations[
nti, 0, xi, yi, zcut:].astype('Float64'))
elif self.params['nhydr'] == 6:
nh = self.hydrogen_populations[nti, :, xi, yi, zcut:]
else:
raise ValueError("(EEE) write_multi: found %i hydrogen levels."
" For multi, need 6 or 1 " % self.params['nhydr'])
M_TO_CM3 = (100.)**3
M_TO_KM = 0.001
temp = self.temperature[nti, xi, yi, zcut:].copy()
ne = self.electron_density[nti, xi, yi, zcut:].copy() / M_TO_CM3
if len(self.z.shape) > 2:
self.z = self.z[:, xi, yi]
z = self.z[nti, zcut:].copy() * M_TO_KM * 1.e5 # in cm
vz = self.velocity_z[nti, xi, yi, zcut:].copy() * M_TO_KM
nh = nh / M_TO_CM3
if writeB:
bx = self.B_x[nti, xi, yi, zcut:].copy()
by = self.B_y[nti, xi, yi, zcut:].copy()
bz = self.B_z[nti, xi, yi, zcut:].copy()
else:
bx = by = bz = None
if depth_optimise:
rho = self.hydrogen_populations[
nti, 0, xi, yi, zcut:] * 2.380491e-24 / M_TO_CM3
res = depth_optim(z, temp, ne, vz, rho, nh=nh, bx=bx, by=by, bz=bz)
z, temp, ne, vz, rho, nh = res[:6]
if writeB:
bx, by, bz = res[6:]
watmos_multi(outfile, temp, ne, z * 1e-5, vz=vz, nh=nh,
write_dscale=write_dscale,
id='%s txy-slice: (t,x,y) = (%i,%i,%i)' %
(self.params['description'], nti, xi, yi))
if writeB:
write_B('%s.B' % outfile, bx, by, bz)
print(('--- Wrote magnetic field to %s.B' % outfile))
def write_multi_3d(self, outfile, nti=0, sx=None, sy=None, sz=None,
big_endian=False):
''' Writes atmosphere in multi_3d format (the same as the
pre-Jorrit multi3d) '''
from . import multi
ul = 1e2 # m to cm
uv = 1e-3 # m/s to km/s
# slicing and unit conversion
if sx is None:
sx = [0, self.nx, 1]
if sy is None:
sy = [0, self.ny, 1]
if sz is None:
sz = [0, self.nz, 1]
if self.params['nhydr'] > 1:
nh = np.mean(self.hydrogen_populations[nti, :, sx[0]:sx[1]:sx[2],
sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]], axis=1) / (ul**3)
else:
nh = self.hydrogen_populations[nti, 0, sx[0]:sx[1]:sx[2],
sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]] / (ul**3)
rho = nh * 2.380491e-24 # nH to rho [g cm-3]
x = self.x[sx[0]:sx[1]:sx[2]] * ul
y = self.y[sy[0]:sy[1]:sy[2]] * ul
z = self.z[nti, sz[0]:sz[1]:sz[2]] * ul
ne = self.electron_density[nti, sx[0]:sx[1]:sx[2], sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]] / (ul**3)
temp = self.temperature[nti, sx[0]:sx[1]:sx[2], sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]]
vz = self.velocity_z[nti, sx[0]:sx[1]:sx[2], sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]] * uv
# write to file
multi.write_atmos3d(outfile, x, y, z, ne, temp, vz, rho=rho,
big_endian=big_endian)
class NcdfAtmos:
def __init__(self, infile):
self.file = read_ncdf(self, infile)
self.closed = False
def close(self):
try:
self.file.close()
self.closed = True
except RuntimeError:
print('(WWW) NcdfAtmos: input file already closed.')
def read(self, infile):
if not self.closed:
self.close()
self.file = read_ncdf(self, infile)
def write_multi(self, outfile, xi, yi, nti=0, writeB=False, write_dscale=False,
zcut=0, depth_optimise=False):
''' Writes MULTI atmosphere file from a column of the 3D model,
in RH 1.5D ncdf format. Also writes the binary XDR file with magnetic
fields, if writeB is true.
'''
from .multi import watmos_multi
from .rh import write_B
writeB = writeB and self.params['has_B']
# if only total H available, will have to use rhpy (which is sometimes
# risky...)
if self.params['nhydr'] == 1:
try:
import rhpy
except ImportError:
raise ValueError("This function depents on rhpy, which is not"
" installed in this system.")
nh = rhpy.nh_lte(self.temperature[nti, xi, yi, zcut:].astype('Float64'),
self.electron_density[
nti, xi, yi, zcut:].astype('Float64'),
self.hydrogen_populations[
nti, 0, xi, yi, zcut:].astype('Float64'))
elif self.params['nhydr'] == 6:
nh = self.hydrogen_populations[nti, :, xi, yi, zcut:]
else:
raise ValueError("(EEE) write_multi: found %i hydrogen levels."
" For multi, need 6 or 1 " % self.params['nhydr'])
M_TO_CM3 = (100.)**3
M_TO_KM = 0.001
temp = self.temperature[nti, xi, yi, zcut:].copy()
ne = self.electron_density[nti, xi, yi, zcut:].copy() / M_TO_CM3
if len(self.z.shape) > 2:
self.z = self.z[:, xi, yi]
z = self.z[nti, zcut:].copy() * M_TO_KM * 1.e5 # in cm
vz = self.velocity_z[nti, xi, yi, zcut:].copy() * M_TO_KM
nh = nh / M_TO_CM3
if writeB:
bx = self.B_x[nti, xi, yi, zcut:].copy()
by = self.B_y[nti, xi, yi, zcut:].copy()
bz = self.B_z[nti, xi, yi, zcut:].copy()
else:
bx = by = bz = None
if depth_optimise:
rho = self.hydrogen_populations[
nti, 0, xi, yi, zcut:] * 2.380491e-24 / M_TO_CM3
res = depth_optim(z, temp, ne, vz, rho, nh=nh, bx=bx, by=by, bz=bz)
z, temp, ne, vz, rho, nh = res[:6]
if writeB:
bx, by, bz = res[6:]
watmos_multi(outfile, temp, ne, z * 1e-5, vz=vz, nh=nh,
write_dscale=write_dscale,
id='%s txy-slice: (t,x,y) = (%i,%i,%i)' %
(self.params['description'], nti, xi, yi))
if writeB:
write_B('%s.B' % outfile, bx, by, bz)
print(('--- Wrote magnetic field to %s.B' % outfile))
def write_multi_3d(self, outfile, nti=0, sx=None, sy=None, sz=None,
big_endian=False):
''' Writes atmosphere in multi_3d format (the same as the
pre-Jorrit multi3d) '''
from . import multi
ul = 1e2 # m to cm
uv = 1e-3 # m/s to km/s
# slicing and unit conversion
if sx is None:
sx = [0, self.nx, 1]
if sy is None:
sy = [0, self.ny, 1]
if sz is None:
sz = [0, self.nz, 1]
if self.params['nhydr'] > 1:
nh = np.mean(self.hydrogen_populations[nti, :, sx[0]:sx[1]:sx[2],
sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]], axis=1) / (ul**3)
else:
nh = self.hydrogen_populations[nti, 0, sx[0]:sx[1]:sx[2],
sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]] / (ul**3)
rho = nh * 2.380491e-24 # nH to rho [g cm-3]
x = self.x[sx[0]:sx[1]:sx[2]] * ul
y = self.y[sy[0]:sy[1]:sy[2]] * ul
z = self.z[nti, sz[0]:sz[1]:sz[2]] * ul
ne = self.electron_density[nti, sx[0]:sx[1]:sx[2], sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]] / (ul**3)
temp = self.temperature[nti, sx[0]:sx[1]:sx[2], sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]]
vz = self.velocity_z[nti, sx[0]:sx[1]:sx[2], sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]] * uv
# write to file
multi.write_atmos3d(outfile, x, y, z, ne, temp, vz, rho=rho,
big_endian=big_endian)
#############################################################################
### TOOLS ###
#############################################################################
class DataHolder:
def __init__(self):
pass
def read_ncdf(inclass, infile):
''' DEPRECATED. Use read_hdf5 instead.
Reads NetCDF file into inclass, instance of any class.
Variables are read into class attributes, dimensions and attributes
are read into params dictionary. '''
from warnings import warn
import netCDF4 as nc
warn("Please use read_hdf5 instead", DeprecationWarning)
# internal attributes of NetCDF groups
ncdf_internals = dir(nc.Dataset)
if not os.path.isfile(infile):
raise IOError('read_ncdf: File %s not found' % infile)
f = nc.Dataset(infile, mode='r')
if 'params' not in dir(inclass):
inclass.params = {}
# add dimensions as attributes
for d in list(f.dimensions.keys()):
inclass.params[d] = len(f.dimensions[d])
# add attributes
attrs = [a for a in dir(f) if a not in ncdf_internals]
for att in attrs:
inclass.params[att] = getattr(f, att)
# add variables
for v in list(f.variables.keys()):
vname = v.replace(' ', '_') # sanitise string for spaces
setattr(inclass, vname, f.variables[v])
# Now do the same for all groups
for group in list(f.groups.keys()):
gname = group.replace(' ', '_') # sanitise string for spaces
setattr(inclass, gname, DataHolder())
cur_group = f.groups[group]
cur_class = getattr(inclass, gname)
# add variables
for v in list(cur_group.variables.keys()):
vname = v.replace(' ', '_') # sanitise string for spaces
setattr(cur_class, vname, cur_group.variables[v])
# add dimensions as attributes
for d in list(cur_group.dimensions.keys()):
inclass.params[d] = len(cur_group.dimensions[d])
# add attributes
attrs = [a for a in dir(cur_group) if a not in ncdf_internals]
for att in attrs:
inclass.params[att] = getattr(cur_group, att)
return f
def read_hdf5(inclass, infile):
"""
Reads HDF5/netCDF4 file into inclass, instance of any class.
Variables are read into class attributes, dimensions and attributes
are read into params dictionary.
"""
import h5py
if not os.path.isfile(infile):
raise IOError('read_hdf5: File %s not found' % infile)
f = h5py.File(infile, mode='r')
if 'params' not in dir(inclass):
inclass.params = {}
# add attributes
attrs = [a for a in f.attrs]
for att in f.attrs:
try:
inclass.params[att] = f.attrs[att]
except OSError: # catch errors where h5py cannot read UTF-8 strings
pass
# add variables and groups
for element in f:
name = element.replace(' ', '_') # sanitise string for spaces
if type(f[element]) == h5py._hl.dataset.Dataset:
setattr(inclass, name, f[element])
# special case for netCDF dimensions, add them to param list
if 'NAME' in f[element].attrs:
if f[element].attrs['NAME'][:20] == b'This is a netCDF dim':
inclass.params[element] = f[element].shape[0]
if type(f[element]) == h5py._hl.group.Group:
setattr(inclass, name, DataHolder())
cur_class = getattr(inclass, name)
cur_class.params = {}
for variable in f[element]: # add group variables
vname = variable.replace(' ', '_')
setattr(cur_class, vname, f[element][variable])
for att in f[element].attrs: # add group attributes
cur_class.params[att] = f[element].attrs[att]
return f
def make_ncdf_atmos(outfile, T, vz, nH, z, x=None, y=None, Bz=None, By=None,
Bx=None, rho=None, ne=None, vx=None, vy=None, desc=None,
snap=None, boundary=[1, 0], comp=False, complev=2,
append=False):
"""
Creates NetCDF input file for rh15d.
IN:
outfile: string name of destination. If file exists it will be wiped.
T: Temperature. Its shape will determine the output dimensions
vz: Same shape as T. In m/s.
ne: Same shape as T. In m-3. Optional.
nH: Shape [6, shape.T]. In m-3.
z: Same shape as last index of T. In m.
x: Same shape as first index of T. In m.
y: Same shape as second index of T. In m.
snap: Snapshot number(s)
Bx, By, Bz: same shape as T. In T.
rho, vx, vy: same shape as T. Optional.
desc: Description string
boundary: Tuple with [bottom, top] boundary conditions. Key is:
0: Zero, 1: Thermalised, 2: Reflective.
append: if True, will append to existing file (if any).
comp: if false, compress file.
complev: compression level.
"""
import netCDF4 as nc
mode = ['w', 'a']
if (append and not os.path.isfile(outfile)):
append = False
rootgrp = nc.Dataset(outfile, mode[append], format='NETCDF4')
complev = 2
nt = 1
if nH.shape == T.shape:
nhydr = 1
else:
nhydr = nH.shape[0]
idx = [None] * (4 - len(T.shape)) + [Ellipsis] # empty axes for 1D/2D/3D
T = T[idx]
if ne is not None:
ne = ne[idx]
nH = nH[idx]
vz = vz[idx]
z = z[idx]
if Bz is not None:
Bx = Bx[idx]
By = By[idx]
Bz = Bz[idx]
if rho is not None:
rho = rho[idx]
if vx is not None:
vx = vx[idx]
if vy is not None:
vy = vy[idx]
if len(T.shape) != 4:
raise ValueError('Invalid shape for T')
if snap is None:
snap = np.arange(nt, dtype='i4')
# for a new file, create dimensions and variables
if not append:
rootgrp.createDimension('nt', None) # create unlimited dimension
rootgrp.createDimension('nx', T.shape[-3])
rootgrp.createDimension('ny', T.shape[-2])
rootgrp.createDimension('nz', T.shape[-1])
rootgrp.createDimension('nhydr', nhydr)
T_var = rootgrp.createVariable('temperature', 'f4',
('nt', 'nx', 'ny', 'nz'), zlib=comp,
least_significant_digit=1,
complevel=complev)
vz_var = rootgrp.createVariable('velocity_z', 'f4',
('nt', 'nx', 'ny', 'nz'), zlib=comp,
least_significant_digit=1,
complevel=complev)
if ne is not None:
ne_var = rootgrp.createVariable('electron_density', 'f8',
('nt', 'nx', 'ny', 'nz'),
zlib=comp, complevel=complev)
ne_var.units = 'm^-3'
nh_var = rootgrp.createVariable('hydrogen_populations', 'f4',
('nt', 'nhydr', 'nx', 'ny', 'nz'),
zlib=comp, complevel=complev)
x_var = rootgrp.createVariable('x', 'f4', ('nx',))
y_var = rootgrp.createVariable('y', 'f4', ('ny',))
z_var = rootgrp.createVariable('z', 'f4', ('nt', 'nz'))
nt_var = rootgrp.createVariable('snapshot_number', 'i4', ('nt',))
T_var.units = 'K'
vz_var.units = 'm s^-1'
nh_var.units = 'm^-3'
z_var.units = 'm'
x_var.units = 'm'
y_var.units = 'm'
if Bz is not None:
bx_var = rootgrp.createVariable('B_x', 'f4',
('nt', 'nx', 'ny', 'nz'), zlib=comp,
least_significant_digit=5,
complevel=complev)
by_var = rootgrp.createVariable('B_y', 'f4',
('nt', 'nx', 'ny', 'nz'), zlib=comp,
least_significant_digit=5,
complevel=complev)
bz_var = rootgrp.createVariable('B_z', 'f4',
('nt', 'nx', 'ny', 'nz'), zlib=comp,
least_significant_digit=5,
complevel=complev)
bx_var.units = 'T'
by_var.units = 'T'
bz_var.units = 'T'
if rho is not None:
rho_var = rootgrp.createVariable('density', 'f4',
('nt', 'nx', 'ny', 'nz'), zlib=comp,
least_significant_digit=5,
complevel=complev)
rho_var.units = 'kg m^-3'
if vx is not None:
vx_var = rootgrp.createVariable('velocity_x', 'f4',
('nt', 'nx', 'ny', 'nz'), zlib=comp,
least_significant_digit=5,
complevel=complev)
vx_var.units = 'm s^-1'
if vy is not None:
vy_var = rootgrp.createVariable('velocity_y', 'f4',
('nt', 'nx', 'ny', 'nz'), zlib=comp,
least_significant_digit=5,
complevel=complev)
vy_var.units = 'm s^-1'
if desc is None:
rootgrp.description = \
"BIFROST snapshot"
else:
rootgrp.description = desc
if boundary is None:
rootgrp.boundary_top = 0
rootgrp.boundary_bottom = 1
else:
rootgrp.boundary_top = boundary[1]
rootgrp.boundary_bottom = boundary[0]
if Bz is None:
rootgrp.has_B = 0
else:
rootgrp.has_B = 1
nt = [0, nt]
else:
# get variables
T_var = rootgrp.variables['temperature']
vz_var = rootgrp.variables['velocity_z']
nh_var = rootgrp.variables['hydrogen_populations']
nt_var = rootgrp.variables['snapshot_number']
x_var = rootgrp.variables['x']
y_var = rootgrp.variables['y']
z_var = rootgrp.variables['z']
if ne is not None:
ne_var = rootgrp.variables['electron_density']
if Bz is not None:
bx_var = rootgrp.variables['B_x']
by_var = rootgrp.variables['B_y']
bz_var = rootgrp.variables['B_z']
if rho is not None:
rho_var = rootgrp.variables['density']
if vx is not None:
vx_var = rootgrp.variables['velocity_x']
if vy is not None:
vy_var = rootgrp.variables['velocity_y']
nti = len(rootgrp.dimensions['nt'])
nt = [nti, nti + nt]
T_var[nt[0]:nt[1]] = T
vz_var[nt[0]:nt[1]] = vz
nh_var[nt[0]:nt[1], :nhydr] = nH
if ne is not None:
ne_var[nt[0]:nt[1]] = ne
if Bz is not None:
bx_var[nt[0]:nt[1]] = Bx
by_var[nt[0]:nt[1]] = By
bz_var[nt[0]:nt[1]] = Bz
if rho is not None:
rho_var[nt[0]:nt[1]] = rho
if vx is not None:
vx_var[nt[0]:nt[1]] = vx
if vy is not None:
vy_var[nt[0]:nt[1]] = vy
x_var[:] = x
y_var[:] = y
z_var[nt[0]:nt[1]] = z
nt_var[nt[0]:nt[1]] = snap
rootgrp.close()
return
def make_hdf5_atmos(outfile, T, vz, nH, z, x=None, y=None, Bz=None, By=None,
Bx=None, rho=None, ne=None, vx=None, vy=None, vturb=None,
desc=None, snap=None, boundary=None, comp=None,
complev=None, append=False):
"""
Creates HDF5 input file for RH 1.5D.
Parameters
----------
outfile : string
Name of destination. If file exists it will be wiped.
T : n-D array
Temperature in K. Its shape will determine the output
dimensions. Shape is generally (nt, nx, ny, nz), but any
dimensions except nz can be omitted. Therefore the array can
be 1D, 2D, or 3D, 4D but ultimately will always be saved as 4D.
vz : n-D array
Line of sight velocity in m/s. Same shape as T.
nH : n-D array
Hydrogen populations in m^-3. Shape is (nt, nhydr, nx, ny, nz),
where nt, nx, ny can be omitted but must be consistent with
the shape of T. nhydr can be 1 (total number of protons) or
more (level populations).
z : n-D array
Height in m. Can have same shape as T (different height scale
for each column) or be only 1D (same height for all columns).
ne : n-D array, optional
Electron density in m^-3. Same shape as T.
rho : n-D array, optional
Density in kg / m^-3. Same shape as T.
vx : n-D array, optional
x velocity in m/s. Same shape as T. Not in use by RH 1.5D.
vy : n-D array, optional
y velocity in m/s. Same shape as T. Not in use by RH 1.5D.
vturb : n-D array, optional
Turbulent velocity (Microturbulence) in km/s. Not usually needed
for MHD models, and should only be used when a depth dependent
microturbulence is needed (constant microturbulence can be added
in RH).
Bx : n-D array, optional
Magnetic field in x dimension, in Tesla. Same shape as T.
By : n-D array, optional
Magnetic field in y dimension, in Tesla. Same shape as T.
Bz : n-D array, optional
Magnetic field in z dimension, in Tesla. Same shape as T.
x : 1-D array, optional
Grid distances in m. Same shape as first index of T.
y : 1-D array, optional
Grid distances in m. Same shape as second index of T.
x : 1-D array, optional
Grid distances in m. Same shape as first index of T.
snap : array-like, optional
Snapshot number(s).
desc : string, optional
Description of file
boundary : Tuple, optional
Tuple with [bottom, top] boundary conditions. Options are:
0: Zero, 1: Thermalised, 2: Reflective.
append : boolean, optional
If True, will append to existing file (if any).
comp : string, optional
Options are: None (default), 'gzip', 'szip', 'lzf'.
complev : integer or tuple, optional
Compression level. Integer for 'gzip', 2-tuple for szip.
"""
mode = ['w', 'a']
if (append and not os.path.isfile(outfile)):
append = False
rootgrp = h5py.File(outfile, mode=mode[append])
idx = [None] * (4 - len(T.shape)) + [Ellipsis] # empty axes for 1D/2D/3D
T = T[idx]
if ne is not None:
ne = ne[idx]
nH = nH[idx]
vz = vz[idx]
z = z[idx]
if Bz is not None:
Bx = Bx[idx]
By = By[idx]
Bz = Bz[idx]
if rho is not None:
rho = rho[idx]
if vx is not None:
vx = vx[idx]
if vy is not None:
vy = vy[idx]
if vturb is not None:
vturb = vturb[idx]
if len(T.shape) != 4:
raise ValueError('Invalid shape for T')
nt = T.shape[0]
nhydr = nH.shape[1]
if snap is None:
snap = np.arange(nt, dtype='i4')
if not append:
# for a new file, create datasets
max_dims = (None,) + T.shape[1:] # time is unlimited dimension
rootgrp.attrs["nx"] = T.shape[-3]
rootgrp.attrs["ny"] = T.shape[-2]
rootgrp.attrs["nz"] = T.shape[-1]
rootgrp.attrs["nhydr"] = nhydr
T_var = rootgrp.create_dataset("temperature", dtype="f4",
shape=T.shape, maxshape=max_dims,
fletcher32=True, compression=comp,
compression_opts=complev)
vz_var = rootgrp.create_dataset("velocity_z", dtype="f4",
shape=T.shape, maxshape=max_dims,
fletcher32=True, compression=comp,
compression_opts=complev)
nh_var = rootgrp.create_dataset("hydrogen_populations", dtype="f4",
shape=(nt, nhydr,) + T.shape[1:],
maxshape=(None, nhydr) + T.shape[1:],
fletcher32=True, compression=comp,
compression_opts=complev)
if ne is not None:
ne_var = rootgrp.create_dataset("electron_density", dtype="f8",
shape=T.shape, maxshape=max_dims,
fletcher32=True, compression=comp,
compression_opts=complev)
ne_var.attrs["units"] = 'm^-3'
x_var = rootgrp.create_dataset("x", dtype="f4", shape=(T.shape[1],))
y_var = rootgrp.create_dataset("y", dtype="f4", shape=(T.shape[2],))
if len(z.shape) == 4:
z_var = rootgrp.create_dataset("z", dtype="f4",
shape=T.shape, maxshape=max_dims,
fletcher32=True, compression=comp,
compression_opts=complev)
elif len(z.shape) == 2:
z_var = rootgrp.create_dataset("z", dtype="f4",
shape=(nt, T.shape[3]),
maxshape=(None, T.shape[3]))
else:
raise ValueError("Invalid shape for z scale")
nt_var = rootgrp.create_dataset("snapshot_number", dtype="i4",
shape=(nt,))
T_var.attrs["units"] = 'K'
vz_var.attrs["units"] = 'm s^-1'
nh_var.attrs["units"] = 'm^-3'
z_var.attrs["units"] = 'm'
x_var.attrs["units"] = 'm'
y_var.attrs["units"] = 'm'
if Bz is not None:
bx_var = rootgrp.create_dataset("B_x", dtype="f4",
shape=T.shape, maxshape=max_dims,
fletcher32=True, compression=comp,
compression_opts=complev)
by_var = rootgrp.create_dataset("B_y", dtype="f4",
shape=T.shape, maxshape=max_dims,
fletcher32=True, compression=comp,
compression_opts=complev)
bz_var = rootgrp.create_dataset("B_z", dtype="f4",
shape=T.shape, maxshape=max_dims,
fletcher32=True, compression=comp,
compression_opts=complev)
bx_var.attrs["units"] = 'T'
by_var.attrs["units"] = 'T'
bz_var.attrs["units"] = 'T'
if rho is not None:
rho_var = rootgrp.create_dataset("density", dtype="f4",
shape=T.shape, maxshape=max_dims,
fletcher32=True, compression=comp,
compression_opts=complev)
rho_var.attrs["units"] = 'kg m^-3'
if vx is not None:
vx_var = rootgrp.create_dataset("velocity_x", dtype="f4",
shape=T.shape, maxshape=max_dims,
fletcher32=True, compression=comp,
compression_opts=complev)
vx_var.attrs["units"] = 'm s^-1'
if vy is not None:
vy_var = rootgrp.create_dataset("velocity_y", dtype="f4",
shape=T.shape, maxshape=max_dims,
fletcher32=True, compression=comp,
compression_opts=complev)
vy_var.attrs["units"] = 'm s^-1'
if vturb is not None:
vt_var = rootgrp.create_dataset("velocity_turbulent", dtype="f4",
shape=T.shape, maxshape=max_dims,
fletcher32=True, compression=comp,
compression_opts=complev)
vt_var.attrs["units"] = 'm s^-1'
if desc is None:
rootgrp.attrs["description"] = ("Created with make_hdf5_atmos "
"on %s" % datetime.datetime.now())
else:
rootgrp.attrs["description"] = desc
if boundary is None:
rootgrp.attrs["boundary_top"] = 0
rootgrp.attrs["boundary_bottom"] = 1
else:
rootgrp.attrs["boundary_top"] = boundary[1]
rootgrp.attrs["boundary_bottom"] = boundary[0]
if Bz is None:
rootgrp.attrs["has_B"] = 0
else:
rootgrp.attrs["has_B"] = 1
nt = [0, nt]
else:
# get variables
T_var = rootgrp['temperature']
vz_var = rootgrp['velocity_z']
nh_var = rootgrp['hydrogen_populations']
nt_var = rootgrp['snapshot_number']
x_var = rootgrp['x']
y_var = rootgrp['y']
z_var = rootgrp['z']
if ne is not None:
ne_var = rootgrp['electron_density']
if Bz is not None:
bx_var = rootgrp['B_x']
by_var = rootgrp['B_y']
bz_var = rootgrp['B_z']
if rho is not None:
rho_var = rootgrp['density']
if vx is not None:
vx_var = rootgrp['velocity_x']
if vy is not None:
vy_var = rootgrp['velocity_y']
if vturb is not None:
vt_var = rootgrp['velocity_turbulent']
nti = int(rootgrp.attrs['nt'])
nt = [nti, nti + nt]
T_var[nt[0]:nt[1]] = T
vz_var[nt[0]:nt[1]] = vz
nh_var[nt[0]:nt[1], :nhydr] = nH
if ne is not None:
ne_var[nt[0]:nt[1]] = ne
if Bz is not None:
bx_var[nt[0]:nt[1]] = Bx
by_var[nt[0]:nt[1]] = By
bz_var[nt[0]:nt[1]] = Bz
if rho is not None:
rho_var[nt[0]:nt[1]] = rho
if vx is not None:
vx_var[nt[0]:nt[1]] = vx
if vy is not None:
vy_var[nt[0]:nt[1]] = vy
if vturb is not None:
vt_var[nt[0]:nt[1]] = vturb
if x is not None:
x_var[:] = x
if y is not None:
y_var[:] = y
z_var[nt[0]:nt[1]] = z
nt_var[nt[0]:nt[1]] = snap
rootgrp.attrs['nt'] = z_var.shape[0]
rootgrp.close()
return
def make_xarray_atmos(outfile, T, vz, z, nH=None, x=None, y=None, Bz=None, By=None,
Bx=None, rho=None, ne=None, vx=None, vy=None, vturb=None,
desc=None, snap=None, boundary=None, append=False):
"""
Creates HDF5 input file for RH 1.5D using xarray.
Parameters
----------
outfile : string
Name of destination. If file exists it will be wiped.
T : n-D array
Temperature in K. Its shape will determine the output
dimensions. Shape is generally (nt, nx, ny, nz), but any
dimensions except nz can be omitted. Therefore the array can
be 1D, 2D, or 3D, 4D but ultimately will always be saved as 4D.
vz : n-D array
Line of sight velocity in m/s. Same shape as T.
z : n-D array
Height in m. Can have same shape as T (different height scale
for each column) or be only 1D (same height for all columns).
nH : n-D array, optional
Hydrogen populations in m^-3. Shape is (nt, nhydr, nx, ny, nz),
where nt, nx, ny can be omitted but must be consistent with
the shape of T. nhydr can be 1 (total number of protons) or
more (level populations). If nH is not given, rho must be given!
ne : n-D array, optional
Electron density in m^-3. Same shape as T.
rho : n-D array, optional
Density in kg m^-3. Same shape as T. Only used if nH is not given.
vx : n-D array, optional
x velocity in m/s. Same shape as T. Not in use by RH 1.5D.
vy : n-D array, optional
y velocity in m/s. Same shape as T. Not in use by RH 1.5D.
vturb : n-D array, optional
Turbulent velocity (Microturbulence) in km/s. Not usually needed
for MHD models, and should only be used when a depth dependent
microturbulence is needed (constant microturbulence can be added
in RH).
Bx : n-D array, optional
Magnetic field in x dimension, in Tesla. Same shape as T.
By : n-D array, optional
Magnetic field in y dimension, in Tesla. Same shape as T.
Bz : n-D array, optional
Magnetic field in z dimension, in Tesla. Same shape as T.
x : 1-D array, optional
Grid distances in m. Same shape as first index of T.
y : 1-D array, optional
Grid distances in m. Same shape as second index of T.
x : 1-D array, optional
Grid distances in m. Same shape as first index of T.
snap : array-like, optional
Snapshot number(s).
desc : string, optional
Description of file
boundary : Tuple, optional
Tuple with [bottom, top] boundary conditions. Options are:
0: Zero, 1: Thermalised, 2: Reflective.
append : boolean, optional
If True, will append to existing file (if any).
"""
data = {'temperature': [T, 'K'],
'velocity_z': [vz, 'm s^-1'],
'velocity_y': [vy, 'm s^-1'],
'velocity_x': [vx, 'm s^-1'],
'electron_density': [ne, 'm^-3'],
'hydrogen_populations': [nH, 'm^-3'],
'density': [rho, 'kg m^-3'],
'B_x': [Bx, 'T'],
'B_y': [By, 'T'],
'B_z': [Bz, 'T'],
'velocity_turbulent': [vturb, 'm s^-1'],
'x': [x, 'm'],
'y': [y, 'm'],
'z': [z, 'm']}
VARS4D = ['temperature', 'B_x', 'B_y', 'B_z', 'density', 'velocity_x',
'velocity_y', 'velocity_z', 'velocity_turbulent', 'density',
'electron_density']
# Remove variables not given
data = {key: data[key] for key in data if data[key][0] is not None}
if (nH is None) and (rho is None):
raise ValueError("Missing nH or rho. Need at least one of them")
if (append and not os.path.isfile(outfile)):
append = False
idx = [None] * (4 - len(T.shape)) + [Ellipsis] # empty axes for 1D/2D/3D
for var in data:
if var not in ['x', 'y']: # these are always 1D
data[var][0] = data[var][0][idx]
if len(data['temperature'][0].shape) != 4:
raise ValueError('Invalid shape for T')
nt, nx, ny, nz = data['temperature'][0].shape
if boundary is None:
boundary = [1, 0]
if snap is None:
data['snapshot_number'] = [np.arange(nt, dtype='i4'), '']
else:
data['snapshot_number'] = [np.array([snap], dtype='i4'), '']
if not append:
variables = {}
coordinates = {}
for v in data:
if v in VARS4D:
variables[v] = (('snapshot_number', 'x', 'y', 'depth'),
data[v][0], {'units': data[v][1]})
elif v == 'hydrogen_populations':
variables[v] = (('snapshot_number', 'nhydr', 'x', 'y', 'depth'),
data[v][0], {'units': data[v][1]})
elif v == 'z':
dims = ('snapshot_number', 'depth')
if len(data[v][0].shape) == 1: # extra dim for nt dependency
data[v][0] = data[v][0][None, :]
elif len(data[v][0].shape) == 4:
dims = ('snapshot_number', 'x', 'y', 'depth')
coordinates[v] = (dims, data[v][0], {'units': data[v][1]})
elif v in ['x', 'y', 'snapshot_number']:
coordinates[v] = ((v), data[v][0], {'units': data[v][1]})
attrs = {"comment": ("Created with make_xarray_atmos "
"on %s" % datetime.datetime.now()),
"boundary_top": boundary[1], "boundary_bottom": boundary[0],
"has_B": int(Bz is not None), "description": str(desc),
"nx": nx, "ny": ny, "nz": nz, "nt": nt}
data = xr.Dataset(variables, coordinates, attrs)
data.to_netcdf(outfile, mode='w', format='NETCDF4',
unlimited_dims=('snapshot_number'))
else: # use h5py to append existing file
rootgrp = h5py.File(outfile, mode='a')
nti = int(rootgrp.attrs['nt'])
#rootgrp.attrs['nt'] = nti + nt # add appended number of snapshots
for var in data:
if var in VARS4D + ['hydrogen_populations', 'z', 'snapshot_number']:
rootgrp[var].resize(nti + nt, axis=0)
rootgrp[var][nti:nti + nt] = data[var][0][:]
rootgrp.close()
def depth_optim(height, temp, ne, vz, rho, nh=None, bx=None, by=None, bz=None,
tmax=5e4):
"""
Performs depth optimisation of one single column (as per multi_3d).
IN:
height [cm]
temp [K]
ne [cm-3]
vz [any]
rho [g cm-3]
nh [any] (optional)
bx,by,bz [any] (optional)
tmax [K] maximum temperature of the first point
"""
from scipy.integrate import cumtrapz
import scipy.interpolate as interp
ndep = len(height)
# calculate optical depth from H-bf only
taumax = 100
grph = 2.26e-24
crhmbf = 2.9256e-17
ee = 1.602189E-12
bk = 1.380662E-16
xhbf = 1.03526e-16 * ne * crhmbf / temp**1.5 * \
np.exp(0.754 * ee / bk / temp) * rho / grph
tau = np.concatenate(([0.], cumtrapz(xhbf, -height)))
idx = (tau < taumax) & (temp < tmax)
# find maximum variance of T, rho, and tau for each depth
tt = temp[idx]
rr = rho[idx]
ta = tau[idx]
tdiv = np.abs(np.log10(tt[1:]) - np.log10(tt[:-1])) / np.log10(1.1)
rdiv = np.abs(np.log10(rr[1:]) - np.log10(rr[:-1])) / np.log10(1.1)
taudiv = np.abs(np.log10(ta[1:]) - np.log10(ta[:-1])) / 0.1
taudiv[0] = 0.
aind = np.concatenate(
([0.], np.cumsum(np.max(np.array([tdiv, rdiv, taudiv]), axis=0))))
aind *= (ndep - 1) / aind[-1]
# interpolate new height so it is constant in aind2
nheight = interp.splev(np.arange(ndep), interp.splrep(
aind, height[idx], k=3, s=0), der=0)
# interpolate quantities for new depth scale
ntemp = np.exp(interp.splev(nheight, interp.splrep(height[::-1], np.log(temp[::-1]),
k=3, s=0), der=0))
nne = np.exp(interp.splev(nheight, interp.splrep(height[::-1], np.log(ne[::-1]),
k=3, s=0), der=0))
nrho = np.exp(interp.splev(nheight, interp.splrep(height[::-1], np.log(rho[::-1]),
k=3, s=0), der=0))
nvz = interp.splev(nheight, interp.splrep(height[::-1], vz[::-1],
k=3, s=0), der=0)
result = [nheight, ntemp, nne, nvz, nrho]
if nh is not None:
for k in range(nh.shape[0]):
nh[k] = np.exp(interp.splev(nheight,
interp.splrep(height[::-1],
np.log(nh[k, ::-1]), k=3,
s=0), der=0))
result += [nh]
if bx is not None:
nbx = interp.splev(nheight, interp.splrep(
height[::-1], bx[::-1], k=3, s=0), der=0)
nby = interp.splev(nheight, interp.splrep(
height[::-1], by[::-1], k=3, s=0), der=0)
nbz = interp.splev(nheight, interp.splrep(
height[::-1], bz[::-1], k=3, s=0), der=0)
result += [nbx, nby, nbz]
return result
def make_wave_file(outfile, start=None, end=None, step=None, new_wave=None,
ewave=None, air=True):
"""
Writes RH wave file (in xdr format). All wavelengths should be in nm.
Parameters
----------
start: number
Starting wavelength.
end: number
Ending wavelength (non-inclusive)
step: number
Wavelength separation
new_wave: 1D array
Alternatively to start/end, one can specify an array of
wavelengths here.
outfile: string
Name of file to write.
ewave: 1-D array, optional
Array of existing wavelengths. Program will make discard points
to make sure no step is enforced using these points too.
air: boolean, optional
If true, will at the end convert the wavelengths into vacuum
wavelengths.
"""
import xdrlib
from ..utils.waveconv import waveconv
if new_wave is None:
new_wave = np.arange(start, end, step)
if None in [start, end, step]:
raise ValueError('Must specify either new_wave, or start, end, '
'step. Stopping.')
if step is None:
step = np.median(np.diff(new_wave))
if ewave is not None: # ensure step is kept at most times
keepers = []
for w in new_wave:
if np.min(np.abs(w - ewave)) > step * 0.375:
keepers.append(w)
new_wave = np.array(keepers)
if air:
new_wave = waveconv(new_wave, mode='air2vac')
# write file
p = xdrlib.Packer()
nw = len(new_wave)
p.pack_int(nw)
p.pack_farray(nw, new_wave.astype('d'), p.pack_double)
f = open(outfile, 'wb')
f.write(p.get_buffer())
f.close()
print(("Wrote %i wavelengths to file." % nw))
return
def read_wave_file(infile):
"""
Reads RH wavelength file.
Parameters
----------
infile - string
Name of wavelength file to read.
"""
import xdrlib
import io
from .rh import read_xdr_var
f = io.open(infile, 'rb')
buf = xdrlib.Unpacker(f.read())
f.close()
nw = read_xdr_var(buf, 'i')
return read_xdr_var(buf, ('d', (nw,)))
def clean_var(data, only_positive=True):
"""
Cleans a 2D or 3D variable filled with NaNs and other irregularities.
"""
from ..utils import utilsfast
data = np.ma.masked_invalid(data, copy=False)
if only_positive:
data = np.ma.masked_less(data, 0., copy=False)
tmp = np.abs(data)
thres = tmp.mean() + tmp.std() * 4 # points more than 4 std away
data = np.ma.masked_where(tmp > thres, data, copy=False)
if data.ndim == 2:
data = data[..., np.newaxis]
for k in range(data.shape[-1]):
tmp = data[..., k].astype("d")
tmp[data[..., k].mask] = np.nan
data[..., k] = utilsfast.replace_nans(tmp, 15, 0.1, 3, "localmean")
return np.squeeze(data)
|
M1kol4j/helita
|
helita/sim/rh15d.py
|
Python
|
bsd-3-clause
| 47,086
|
[
"NetCDF"
] |
c236a4d7842ada67c62ab13409b6c5ec554b7678fe62d007434acfa84773dd81
|
# -*- coding: utf-8 -*-
{
"'Sounds-like' name search allowing search even the spelling of the name is not known exactly": "'Sounds-like'名稱搜尋或搜尋,即使名稱的拼字不完全",
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": '一个位置指定地理區域的這个區域。 這可以是位置的位置階層,或"群組位置",或位置有界限的區域。',
"Acronym of the organization's name, eg. IFRC.": '縮寫的組織的名稱,例如: IFRC。',
"Authenticate system's Twitter account": '系统的鉴別Twitter账户',
"Can't import tweepy": '無法匯入tweepy',
"Caution: doesn't respect the framework rules!": '警告:不符合架构規則!',
"Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.": '按一下"抵押"按鈕左手邊的直欄來進行抵押以符合要求的协助。',
"Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.": '詳細地址的站點的参考/后勤的用途。 請注意,您可以新增GIS/對映資料中的關于此站台"位置"欄位下面的說明。',
"Facilitate uploading of missing person's photograph": '促進上传失蹤人口的照片',
"Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": '清單格式屬性的值" & RGB值用于為JSON物件,例如: {0}紅色: \'#FF0000,綠色: \'#00FF00,黃色: \'#FFFF00的',
"Grouping by 'Family Unit' or other group category": '分組\'系列單元"或"其他"群組種類',
"If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": '如果選取,則此資產的位置將會被更新時,人員的位置已更新。',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": '如果此配置代表一个區域的區域功能表上,請提供一个名稱,以使用在功能表中。 名稱的个人對映配置將會設為使用者的名稱。',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": '如果這个欄位會移入,則使用者指定此組織時,註冊將指定為一个人員的組織,除非它們的網域不符合網域欄位。',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": '如果這是起來,則這會成為使用者的基本位置和因此使用者在地圖上顯示',
"If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.": '如果啟用了這項設定,則所有刪除的記錄只是標示為刪除而確定刪除。 它們會顯示在原始資料庫存取,但不會看到一般使用者。',
"If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:": '如果您無法找到該記錄的人員您要報告丟失了,您可以將它新增至按一下"新增人員"如下:',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": '如果您沒有看到"以在清單中,您可以新增一个新的按一下鏈結新增醫院。',
"If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": '如果您沒有看到"辦事處清單中,您可以新增一个新的按一下鏈結新增Out of Office。',
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": '如果您沒有看到"組織清單中,您可以新增一个新的按一下鏈結新增組織。',
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": '而自動從其他同步對等網路上,您也可以同步檔案,這是必需的,沒有網路。 您可以利用這个頁面來匯入同步檔案資料,匯出資料要同步化的檔案。 上的鏈結,按一下滑鼠右鍵,前往這个頁面。',
"Level is higher than parent's": '母項的層次高于',
"NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.": "Nb SMS要求過濾,只是一个actionable',時, Tweet要求過濾,因此可能會是一个好開始搜尋。",
"Need a 'url' argument!": "需要一个'URL'引數!",
"Note that the dropdowns won't refresh automatically. Refresh the page if you wish to verify that the locations have gone.": '請注意,清單不會自動重新整理。 如果您想要重新整理頁面,以驗證"位置不存在。',
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "選用。 幾何形狀的名稱直欄。 在PostGIS預設為'the_geom'.",
"Parent level should be higher than this record's level. Parent level is": '母項層次應該高于此記錄的層次。 母項層次是',
"Password fields don't match": '密碼欄位不符',
"Phone number to donate to this organization's relief efforts.": '捐贈撥打電話號碼這个組織的釋放工作。',
"Please come back after sometime if that doesn't help.": '請回到之后,如果該時間不說明。',
"Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.": '按下"刪除舊的按鈕,使所有記錄参照此一个被repointed在新的一个,則舊記錄將被刪除。',
"Quantity in %s's Inventory": '以百分比的庫存數量',
"Search here for a person's record in order to:": '搜尋這裡的人員的記錄,以便:',
"Select a Room from the list or click 'Create Room'": '選取會議室從清單,或按一下新增空間"',
"Select a person in charge for status 'assigned'": "選取一个人員負責的狀態'指定的'",
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": '選取這个如果所有特定位置需要母項在最深層次的位置階層。 例如,如果"地區"的最小部門階層中,則所有特定位置所需的要區域作為母項。',
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": '選取這个如果所有特定位置需要一个母項位置階層。 這可协助在設定"地區"代表一个受影响的區域。',
"Sorry, things didn't get done on time.": '抱歉,項目沒有取得完成的時間。',
"Sorry, we couldn't find that page.": '很抱歉,我們找不到該頁面。',
"System's Twitter account updated": '系统的Twitter更新账户',
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": '在Donor(S)適用于這个專案。 可以選取多个值,請按住控制鍵。',
"The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.": '部門(s)此組織運作中。 可以選取多个值,請按住控制鍵。',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": '映像檔的URL。 如果您不上传影像檔案,則您必须指定其位置在這裡。',
"The person's manager within this Office/Project.": '人員的管理員在這个辦事處/專案。',
"To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '搜尋人員名稱,輸入任何的第一个,中間或最后一个名稱,以空格區隔。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,會列出所有的人。',
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": '要搜尋的主体,請輸入ID標籤的主体。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,會列出所有的主体。',
"To search for a hospital, enter any of the names or IDs of the hospital, or the organisation name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '要搜尋的醫院,輸入的任何名稱或ID的醫院,或組織名稱或縮寫,以空格區隔。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,以列出所有醫院。',
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '要搜尋的醫院,輸入的任何名稱或ID的醫院,以空格區隔。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,以列出所有醫院。',
"To search for a hospital, enter any part of the name or ID. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '要搜尋的醫院,輸入的任何部分的名稱或ID。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,以列出所有醫院。',
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": '要搜尋的位置,輸入該名稱。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,會列出所有的位置。',
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '搜尋人員,輸入任何的第一个,中間或最后一个名稱和/或ID號碼的人員,以空格區隔。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,會列出所有的人。',
"To search for a person, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '搜尋人員,輸入任何的第一个,中間或最后一个名稱,以空格區隔。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,會列出所有的人。',
"To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.": '搜尋要求時,輸入您要尋找的部分文字。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,會列出所有的要求。',
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": '要搜尋的評估,輸入任何部分的票据號碼的評估。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,以列出所有評估。',
"Type the first few characters of one of the Person's names.": '輸入前幾个字元的其中一个人員的名稱。',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": '上传影像檔案在這裡。 如果您不上传影像檔案,則您必须指定其位置在URL欄位中。',
"View and/or update details of the person's record": '檢視及/或更新詳細資料的人員的記錄',
"View/Edit the Database directly (caution: doesn't respect the framework rules!)": '檢視/編輯資料庫直接(警告:不符合架构規則! )',
"What are the people's normal ways to obtain food in this area?": '在這個地區通常大家怎麼取得食物?',
"What should be done to reduce women and children's vulnerability to violence?": '如何做才能減少婦女和小孩遭受暴力?',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": '當與他人同步數據 ,兩個(或多方)要同步的資料都已經修改的情況下衝突發生,即信息相互矛盾。 同步模組嘗試解析這類冲突自動,但是在某些情湟下不能。 在這些情湟下,您有來解决這些冲突,請手動按一下上的鏈結,才能進入這个頁面。',
"You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click": '您已經設定密碼,因此在這裡進行的變更不會顯示給您。 若要變更您的設定,請按一下个人化',
"You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": "您有未儲存的變更。 現在按一下'取消',然后'儲存',以儲存它們。 按一下確定以立即舍棄它們。",
"You haven't made any calculations": '您尚未進行任何計算',
"couldn't be parsed so NetworkLinks not followed.": '無法剖析,因此NetworkLinks不遵循。',
"includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": '包括一个GroundOverlay或ScreenOverlay都不支援在OpenLayers尚未,因此可能無法正常運作。',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update"是選用的表示式類似"field1=\'newvalue\'"。您不能更新或刪除結果的結合',
'# of Houses Damaged': '損壞的房屋數',
'# of Houses Destroyed': '損毀的房屋數',
'# of International Staff': '国際人員的人數',
'# of National Staff': '#的国家人員',
'# of People Affected': '#的人員分配',
'# of People Deceased': '#的人員死亡',
'# of People Injured': '#的人員受傷',
'# of Vehicles': '#的媒介',
'%(count)s rows deleted': '%(count)s已刪除的橫列',
'%(count)s rows updated': '%(count)s已更新的橫列',
'%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\nIf的要求類型是"%(type)s",請輸入 %(type)s 在下一个畫面。',
'%(system_name)s - Verify Email': '%(system_name)s - 驗證電子郵件',
'%.1f km': '%.1f公里',
'& then click on the map below to adjust the Lat/Lon fields': '&,然后按一下"對映"下面的調整平面/長欄位',
'* Required Fields': '* 必填欄位',
'0-15 minutes': '〇-15分鐘',
'1 Assessment': '一評量',
'1 location, shorter time, can contain multiple Tasks': '一位置,較短的時間,可以包含多个作業',
'1-3 days': '1-3 天',
'1. Fill the necessary fields in BLOCK letters.': '一,填入必要的欄位區塊字母。',
'15-30 minutes': '15-30分鐘',
'2 different options are provided here currently:': '二个不同的選項此處提供目前:',
'2. Always use one box per letter and leave one box space to seperate words.': '二一律使用有一个方框依字母,并保留空間有一个方框來分隔文字。',
'2x4 Car': '2x4車',
'30-60 minutes': '30-60分鐘',
'4-7 days': '四-七天',
'4x4 Car': '電腦(4x4)車',
'8-14 days': '八-14天',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': '一个標記,指派給个別位置設定時需要置換的記號指派給功能類別。',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': '一个参考文件,如:檔案, URL或聯絡人,以驗證這項資料。 您可以鍵入第1幾个字元的文件名稱,以鏈結至現有的文件。',
'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.': '倉庫/網站是一个實体位置的地址与GIS資料位置的項目會儲存。 它可以是建置,一个特定區域中的城市或任何類似。',
'A brief description of the group (optional)': '群組的簡要說明(選用)',
'A file downloaded from a GPS containing a series of geographic points in XML format.': '從全球定位系統下載的文件包含了一系列XML格式的地理點。',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': '在來自全球定位系統GPX格式的文件,其時間戳可與照片的時間戳關聯以在地圖上找到它們。',
'A library of digital resources, such as Photos, signed contracts and Office documents.': '一个庫的數位資源,如照片,已簽署的合約和Office文件。',
'A library of digital resources, such as photos, documents and reports': '一个庫的數位資源,如照片,文檔和報告',
'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': '位置群組可用來定义的范圍的受影响的區域,如果它未落在一个管理區域。',
'A location group is a set of locations (often, a set of administrative regions representing a combined area). Member locations are added to a location group here. Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group. A location group can be used to define the extent of an affected area, if it does not fall within one administrative region. Location groups can be used in the Regions menu.': '位置群組是一組的位置(通常是一个管理區域表示結合"地區")。 成員位置會新增至位置群組在這裡。 位置群組可能用來過濾顯示的內容在地圖上和在搜尋結果中只能實体所涵蓋的位置群組。 位置群組可用來定义的范圍的受影响的區域,如果它未落在一个管理區域。 位置群組可用于區域的功能表。',
'A location group is a set of locations (often, a set of administrative regions representing a combined area).': '位置群組是一組的位置(通常是一个管理區域表示結合"地區")。',
'A location group must have at least one member.': '位置群組必须至少有一个成員。',
'A place within a Site like a Shelf, room, bin number etc.': '一个位置網站內的類似層板,房間,貯存箱號碼等等。',
'A practical example can be of a report of lost person. Now if one machine register him to be found on 16th August and another machine registers him to found on 17th August, then e.g. Newer timestamp will replace data entry of your machine with that of foriegn machine because that is newer one.': '一个實用的范例可以是一个報告的遺失人員。 現在如果一台機器註冊該上找到第16 8月及另一部機器登錄該使用者上找第17 8月,例如,然后新的時間戳記會取代資料項目的機器的外部機器的原因是新的。',
'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.': 'Snapshot的bin或其他文件包含有關的增補資訊可以上传這裡。',
'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.': '一个Snapshot的位置或其他文件包含有關的增補資訊位置可以上传在這裡。',
'A snapshot of the location or additional documents that contain supplementary information about the Site can be uploaded here.': '一个Snapshot的位置或其他文件包含有關的增補資訊的網站可以上传在這裡。',
'A survey series with id %s does not exist. Please go back and create one.': '一个調查系列ID為%s不存在。 請回上頁,并建立一个。',
'ABOUT THIS MODULE': '關於此模組',
'ABOUT': '關於',
'ACCESS DATA': '存取資料',
'ANY': '任何',
'API is documented here': 'API是記載在這裡',
'ATC-20 Rapid Evaluation modified for New Zealand': 'ATC-20快速評估修改新西蘭',
'Abbreviation': '縮寫',
'Ability to Fill Out Surveys': '能够填寫調查',
'Ability to customize the list of details tracked at a Shelter': '能够自訂清單的詳細追蹤, Shelter',
'Ability to customize the list of human resource tracked at a Shelter': '能够自訂清單的人力資源上追蹤一个Shelter',
'Ability to customize the list of important facilities needed at a Shelter': '能够自訂清單的重要設備需要在一个Shelter',
'Ability to track partial fulfillment of the request': '能够追蹤部分履行的要求',
'Ability to view Results of Completed and/or partially filled out Surveys': '可用來檢視結果的完成和/或部分填寫調查',
'About Sahana Eden': '關于Sahana Eden',
'About Sahana': '關于Sahana',
'About this module': '關於此模組',
'About': '關於',
'Access denied': '拒絕存取',
'Access to Shelter': '若要存取Shelter',
'Access to education services': '若要存取教育服務',
'Accessibility of Affected Location': '协助工具的受影响的位置',
'Account Registered - Please Check Your Email': '帳戶已註冊-請檢查您的電子郵件',
'Account registered, however registration is still pending approval - please wait until confirmation received.': '账户登錄,但是登錄仍在擱置核准-請稍候直到收到確認。',
'Acronym': '字首語',
'Actionable by all targeted recipients': '所有可執行目標收件者',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': '僅可由指定的練習"参与者"練習應該出現在ID<note>',
'Actionable': '可行',
'Actioned?': '大通?',
'Actions taken as a result of this request.': '採取的動作的結果,這个要求。',
'Actions': '動作',
'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': '從方案模板激活活動以分配適當的資源(人力,資產及設施)。',
'Active Problems': '作用中問題',
'Active': '作用中',
'Activities matching Assessments:': '相符的活動評量:',
'Activities of boys 13-17yrs before disaster': '活動的男女13-17yrs前災難',
'Activities of boys 13-17yrs now': '活動的男女13-17yrs現在',
'Activities of boys <12yrs before disaster': '活動的男孩<12yrs之前災難',
'Activities of boys <12yrs now': '活動的<12yrs現在男孩',
'Activities of children': '活動的子項',
'Activities of girls 13-17yrs before disaster': '活動的女孩13 17yrs之前災難',
'Activities of girls 13-17yrs now': '活動的女孩13 17yrs現在',
'Activities of girls <12yrs before disaster': '活動的女孩<12yrs之前災難',
'Activities of girls <12yrs now': '活動的女孩<12yrs現在',
'Activities': '活動',
'Activities:': '活動:',
'Activity Added': '新增活動',
'Activity Deleted': '刪除活動',
'Activity Details': '活動明細',
'Activity Report': '活動報告',
'Activity Reports': '活動報告',
'Activity Type': '活動類型',
'Activity Updated': '更新活動',
'Activity': '活動',
'Add Activity Type': '新增活動類型',
'Add Address': '新增地址',
'Add Aid Request': '新增輔助請求',
'Add Alternative Item': '新增替代項目',
'Add Assessment Summary': '新增評量摘要',
'Add Assessment': '新增評量',
'Add Asset Log Entry - Change Label': '新增資產日誌項目-變更標籤',
'Add Availability': '新增可用性',
'Add Baseline Type': '新增基準线類型',
'Add Baseline': '新增基準线',
'Add Bin Type': '新增bin類型',
'Add Bins': '新增圖表匣',
'Add Bundle': '新增軟體組',
'Add Camp Service': 'Camp新增服務',
'Add Camp Type': 'Camp新增類型',
'Add Camp': '新增Camp',
'Add Catalog.': '新增型錄。',
'Add Category': '新增種類',
'Add Category<>Sub-Category<>Catalog Relation': '新增Category<>Sub-Category<>Catalog關系',
'Add Certification': '新增認證',
'Add Competency': '新增能力',
'Add Config': '新增配置',
'Add Contact': '新增聯絡人',
'Add Contact Information': '新增聯絡資訊',
'Add Course Certicate': '新增進程凭證',
'Add Credential': '新增認證',
'Add Credentials': '新增認證',
'Add Disaster Victims': '新增災難受害者',
'Add Distribution': '新增配送',
'Add Donor': '新增Donor',
'Add Flood Report': '新增水災報告',
'Add GIS Feature': '新增GIS功能',
'Add Group Member': '新增群組成員',
'Add Human Resource': '新增人力資源',
'Add Identity': '新增新的身分',
'Add Identity': '新增身分',
'Add Image': '新增影像',
'Add Impact Type': '新增影响類型',
'Add Impact': '新增影响',
'Add Inventory Item': '新增庫存項目',
'Add Inventory Location': '新增庫存位置',
'Add Inventory Store': '新增資產儲存庫',
'Add Item (s)': '新增項目(S)',
'Add Item Catalog Category': '新增項目型錄種類',
'Add Item Catalog': '新增項目型錄',
'Add Item Sub-Category': '添加子類別',
'Add Item to Catalog': '新增項目到型錄',
'Add Item to Commitment': '新增項目至承諾',
'Add Item to Inventory': '新增項目至庫存',
'Add Item to Request': '新增項目至要求',
'Add Item to Shipment': '新增項目至出貨',
'Add Item': '新增項目',
'Add Job Role': '新增工作角色',
'Add Key': '新增金鑰',
'Add Kit': '新增套件',
'Add Landmark': '新增里程碑',
'Add Level 1 Assessment': '新增層次一評量',
'Add Level 2 Assessment': '新增層次二評量',
'Add Line': '新增一行',
'Add Locations': '新增位置',
'Add Log Entry': '新增日誌項目',
'Add Member': '新增成員',
'Add Membership': '新增成員資格',
'Add Message': '新增訊息',
'Add Metadata': '新增元數據',
'Add Mission': '新增任務',
'Add Need Type': '新增需要類型',
'Add Need': '新增需要',
'Add New Aid Request': '新增輔助請求',
'Add New Assessment Summary': '新增評量摘要',
'Add New Baseline Type': '新增基準线類型',
'Add New Baseline': '新增基準線',
'Add New Bin Type': '新增bin類型',
'Add New Bin': '新增新貯存箱',
'Add New Budget': '新增新預算',
'Add New Bundle': '新增軟体組',
'Add New Camp Service': '新增Camp服務',
'Add New Camp Type': '新增Camp類型',
'Add New Camp': '新增Camp',
'Add New Cluster Subsector': '新增叢集Subsector',
'Add New Cluster': '新增叢集',
'Add New Commitment Item': '新增承諾書項目',
'Add New Config': '新增配置',
'Add New Distribution Item': '新增分配項目',
'Add New Distribution': '新增分配',
'Add New Document': '新增文件',
'Add New Donor': '新增Donor',
'Add New Entry': '新增項目',
'Add New Event': '新增事件',
'Add New Flood Report': '新增水災報告',
'Add New Human Resource': '新增人力資源',
'Add New Image': '新增影像',
'Add New Impact Type': '新增影响類型',
'Add New Impact': '新增新影响',
'Add New Inventory Item': '新增庫存項目',
'Add New Inventory Location': '新增庫存位置',
'Add New Inventory Store': '新增至資產儲存庫',
'Add New Item Catalog Category': '新增項目型錄種類',
'Add New Item Catalog': '新增項目型錄',
'Add New Item Sub-Category': '新增項目子類別',
'Add New Item to Kit': '新增項目至套件',
'Add New Key': '新增金鑰',
'Add New Landmark': '新增里程碑',
'Add New Level 1 Assessment': '新增層次一評量',
'Add New Level 2 Assessment': '新增層次二評量',
'Add New Member': '新增成員',
'Add New Membership': '新增組員',
'Add New Metadata': '新增meta資料',
'Add New Need Type': '新增需要類型',
'Add New Need': '新增需要',
'Add New Note': '新增附註',
'Add New Partner': '新增夥伴',
'Add New Patient': '新增病人',
'Add New Peer': '新增同層級',
'Add New Population Statistic': '新增人口统計資料',
'Add New Position': '新增位置',
'Add New Problem': '新增問題',
'Add New Rapid Assessment': '新增快速評量',
'Add New Received Item': '新增接收項目',
'Add New Record': '新增記錄',
'Add New Request Item': '新增要求項目',
'Add New Request': '新增要求',
'Add New Response': '新增回應',
'Add New River': '新增金水河',
'Add New Role to User': '新增角色至使用者',
'Add New Scenario': '新增實務',
'Add New School District': '新增學校特區',
'Add New School Report': '新增學校報告',
'Add New Section': '新增區段',
'Add New Sent Item': '新增传送的項目',
'Add New Setting': '新增設定',
'Add New Shipment to Send': '新增出貨以传送',
'Add New Site': '新增網站',
'Add New Solution': '新增解决方案',
'Add New Source': '新增來源',
'Add New Staff Type': '新增工作人員類型',
'Add New Staff': '新增人員',
'Add New Storage Location': '新增儲存位置',
'Add New Subsector': '新增Subsector',
'Add New Survey Answer': '新增問卷調查回答',
'Add New Survey Question': '新增問卷調查問題',
'Add New Survey Section': '新增問卷調查部分',
'Add New Survey Series': '新增問卷調查系列',
'Add New Survey Template': '新增調查范本',
'Add New Team': '新增團隊',
'Add New Ticket': '新增問題單',
'Add New Track': '新增追蹤',
'Add New Unit': '新增單位',
'Add New Update': '新增更新',
'Add New User to Role': '新增使用者至角色',
'Add New': '新增',
'Add Note': '新增附註',
'Add Partner': '新增夥伴',
'Add Peer': '新增同層級',
'Add Person': '新增人員',
'Add Photo': '新增照片',
'Add Point': '新增點',
'Add Polygon': '新增多邊形',
'Add Population Statistic': '新增人口统計資料',
'Add Position': '新增位置',
'Add Problem': '新增問題',
'Add Projections': '新增估算',
'Add Question': '新增問題',
'Add Rapid Assessment': '新增快速評量',
'Add Recipient Site': '新增收件者網站',
'Add Recipient': '新增接收者',
'Add Record': '新增記錄',
'Add Recovery Report': '新增回复報告',
'Add Reference Document': '新增参照文件',
'Add Relief Item': '新增浮雕項目',
'Add Report': '新增報告',
'Add Request Detail': '新增要求詳細資料',
'Add Request Item': '新增要求項目',
'Add Request': '新增要求',
'Add Response': '新增回應',
'Add School District': '新增學校特區',
'Add School Report': '新增學校報告',
'Add Section': '新增區段',
'Add Sender Organization': '新增寄件者組織',
'Add Sender Site': '新增寄件者網站',
'Add Setting': '新增設定',
'Add Shipment Transit Log': '新增出貨传輸日誌',
'Add Shipment/Way Bills': '新增出貨/方式账單',
'Add Site': '新增站台',
'Add Skill Equivalence': '新增等值技能',
'Add Skill Provision': '新增供應技能',
'Add Skill Types': '技能新增類型',
'Add Solution': '新增解決方案',
'Add Source': '新增來源',
'Add Staff Type': '新增人員類型',
'Add Staff': '新增人員',
'Add Storage Bin Type': '新增儲存体bin類型',
'Add Storage Bin': '新增儲存体bin',
'Add Storage Location': '新增儲存體位置',
'Add Sub-Category': '新增子種類',
'Add Subscription': '新增訂閱',
'Add Subsector': '新增Subsector',
'Add Survey Answer': '新增調查回答',
'Add Survey Question': '新增調查問題',
'Add Survey Section': '新增調查區段',
'Add Survey Series': '新增調查系列',
'Add Survey Template': '新增調查范本',
'Add Team Member': '新增成員',
'Add Team': '新增團隊',
'Add Ticket': '新增問題單',
'Add Training': '新增訓練',
'Add Unit': '新增單位',
'Add Update': '新增更新',
'Add Volunteer Availability': '新增自愿可用性',
'Add Volunteer Registration': '新增自愿登錄',
'Add a New Inventory Location': '新增一个新庫存位置',
'Add a New Relief Item': '新增一个新的項目',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': '新增一个参考文件,如:檔案, URL或聯絡人,以驗證這項資料。 如果您不輸入一个参照文件,您的電子郵件將不顯示。',
'Add a Reference Document such as a file, URL or contact person to verify this data.': '新增一个参考文件,如:檔案, URL或聯絡人,以驗證這項資料。',
'Add a Volunteer': '新增一个主動',
'Add a new Relief Item.': '新增一个新的項目。',
'Add a new Site from where the Item is being sent.': '新增一个新的站點的項目被送出。',
'Add a new Site where the Item is being sent to.': '新增一个新的項目的场所传送。',
'Add a new certificate to the catalog.': '於目錄添加新的證書。',
'Add a new competency rating to the catalog.': '新增一个新的能力的分級目錄。',
'Add a new course to the catalog.': '新增一个新的進程至型錄。',
'Add a new job role to the catalog.': '新增一个新的工作角色至型錄。',
'Add a new skill provision to the catalog.': '新增一个新技術供應至型錄。',
'Add a new skill to the catalog.': '新增一个新技術的型錄。',
'Add a new skill type to the catalog.': '新增一个新的技術類型至型錄。',
'Add an Photo.': '新增一个照片。',
'Add main Item Category.': '新增主要項目種類。',
'Add main Item Sub-Category.': '新增主要項目子類別。',
'Add new Group': '新增群組',
'Add new Individual': '新增个別',
'Add new person.': '新增人員。',
'Add new position.': '新增位置。',
'Add new project.': '新增專案。',
'Add new staff role.': '新增工作人員角色。',
'Add new staff.': '新增人員。',
'Add or Update': '新增或更新',
'Add staff members': '新增人員成員',
'Add the Storage Bin Type.': '新增儲存体bin類型。',
'Add the Storage Location where this bin is located.': '新增儲存体位置這位的位置。',
'Add the Storage Location where this this Bin belongs to.': '新增儲存体位置這个紙匣所屬。',
'Add the main Warehouse/Site information where this Bin belongs to.': '新增主要倉儲/站台資訊在此Bin屬于。',
'Add the main Warehouse/Site information where this Item is to be added.': '新增主要倉儲/站台資訊在這个項目是要新增。',
'Add the main Warehouse/Site information where this Storage location is.': '新增主要倉儲/資訊為的儲存体位置。',
'Add the unit of measure if it doesnt exists already.': '新增測量單位如果不存在。',
'Add to Bundle': '新增至軟體組',
'Add to Catalog': '新增至型錄',
'Add to budget': '新增至預算',
'Add volunteers': '新增志愿者',
'Add': '新增',
'Add/Edit/Remove Layers': '新增/編輯/移除層',
'Added to Group': '組員已新增',
'Added to Team': '組員已新增',
'Additional Beds / 24hrs': '其他Beds / 24hrs',
'Additional Comments': '其他註解',
'Additional quantity quantifier – i.e. “4x5”.': '其他數量限量元-也就是"4x5"。',
'Address Details': '位址詳細資料',
'Address Type': '位址類型',
'Address added': '新增位址',
'Address deleted': '刪除地址',
'Address updated': '更新地址',
'Address': '地址',
'Addresses': '地址',
'Adequate food and water available': '足够的食物和水可用',
'Adequate': '足够',
'Adjust Item(s) Quantity': '調整項目(s)的數量',
'Adjust Items due to Theft/Loss': '調整項目由于遭竊/遺失',
'Admin Email': '管理電子郵件',
'Admin Name': 'Admin 名稱',
'Admin Tel': '管理TEL',
'Admin': '管理權',
'Administration': '管理模組',
'Administrator': '管理者',
'Adolescent (12-20)': '青少年 (13-17)',
'Adolescent participating in coping activities': 'Adolescent参与复制活動',
'Adult (21-50)': '成人 (16-64)',
'Adult ICU': '成人ICU',
'Adult Psychiatric': '成人Psychiatric',
'Adult female': '成人女性',
'Adult male': '成人男性',
'Adults in prisons': 'prisons中的成人',
'Advanced Bin Search': '進階搜尋bin',
'Advanced Catalog Search': '進階搜尋型錄',
'Advanced Category Search': '進階搜尋種類',
'Advanced Item Search': '進階搜尋項目',
'Advanced Location Search': '進階搜尋位置',
'Advanced Site Search': '進階網站搜尋',
'Advanced Sub-Category Search': '先進的子分類搜索',
'Advanced Unit Search': '進階單位搜索',
'Advanced:': '進階:',
'Advisory': '諮詢',
'Affectees Families settled in the school belong to district': '受影響家庭定居在區內學校',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': '之后,按一下按鈕時,一个組成對的項目會顯示一个。 請選取一个解决方案中每一對您喜好的"其他"。',
'Age Group': '年齡層',
'Age group does not match actual age.': '群組不符合實際經歷時間。',
'Age group': '年齡層',
'Aggravating factors': 'Aggravating因素',
'Aggregate Items': '聚集項目',
'Agriculture': '農業',
'Aid Request Details': '輔助要求詳細資料',
'Aid Request added': '輔助請求添加',
'Aid Request deleted': '輔助刪除要求',
'Aid Request updated': '要求更新輔助',
'Aid Request': '輔助請求',
'Aid Requests': '輔助要求',
'Air Transport Service': '空氣传輸服務',
'Aircraft Crash': '墜機',
'Aircraft Hijacking': '飛機强制存取',
'Airport Closure': '機场關閉',
'Airport': '機場',
'Airspace Closure': 'Airspace關閉',
'Alcohol': '酒精',
'Alert': '警示',
'All Inbound & Outbound Messages are stored here': '所有入埠及出埠訊息儲存在這裡',
'All Locations': '所有位置',
'All Pledges': '所有抵押',
'All Requested Items': '所有要求的項目',
'All Resources': '所有資源',
'All data is able to be shared with other sites in real time.': '所有資料可以共用其他站台的實際時間。',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': '所有資料所提供的Sahana Software Foundation從這个網站授權下的創意Commons聲明授權。 然而,并非所有資料產生在這裡。 請参閱來源欄位的每一个項目。',
'All': '所有',
'Allowed to push': '允許推送',
'Allows a Budget to be drawn up': '容許預算要繪制設置',
'Allows authorized users to control which layers are available to the situation map.': '可讓授權使用者來控制層可用的狀湟對映。',
'Allows authorized users to upload multiple features into the situation map.': '容許授權的使用者上传多个特性的狀湟對映。',
'Alternative Item Details': '替代項目詳細資料',
'Alternative Item added': '新增替代項目',
'Alternative Item deleted': '替代項目刪除',
'Alternative Item updated': '替代更新項目',
'Alternative Item': '替代項目',
'Alternative Items': '替代項目',
'Alternative infant nutrition in use': '替代嬰兒營養使用中',
'Alternative places for studying available': '替代的工作區研究可用',
'Alternative places for studying': '替代工作區的研究',
'Ambulance Service': '救護車服務',
'An Inventory Store is a physical place which contains Relief Items available to be Distributed.': '一个資產儲存庫是一个實体位置包含的項目可用的。',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': '一个進氣區系统,倉儲管理系统,商品追蹤,供應鏈管理,採購,及其他資產和資源管理功能。',
'An interactive map of the situation.': '互動式對映的狀湟。',
'An item which can be used in place of another item': '一个項目可用于代替另一个項目',
'Analysis of Completed Surveys': '分析完成的調查',
'Animal Die Off': '動物骰子關閉',
'Animal Feed': '動物饋送',
'Animals': '動物',
'Answer Choices (One Per Line)': '答案選項(每行一)',
'Antibiotics available': 'Antibiotics可用',
'Antibiotics needed per 24h': 'Antibiotics需要每小時',
'Any available Metadata in the files will be read automatically, such as Timestamp, Author, Latitude & Longitude.': '任何可用的meta資料中,將檔案自動讀取,例如時間戳記,作者,緯度和經度。',
'Any comments about this sync partner.': '任何相關註解這个同步伙伴。',
'Apparent Age': '明顯經歷時間',
'Apparent Gender': '明顯性別',
'Application Deadline': '應用程式截止時間',
'Appropriate clothing available': '適當的衣服可用',
'Appropriate cooking equipment/materials in HH': '烹飪適當設備/材料hh',
'Approve': '核准',
'Approved': '已核准',
'Approver': '核准者',
'Approx. number of cases/48h': '大約 號碼的案例/小時',
'Approximately how many children under 5 with diarrhea in the past 48 hours?': '大約有多少下的五diarrhea在過去48小時內?',
'Archive not Delete': '無法刪除保存',
'Arctic Outflow': '北極串流',
'Are basic medical supplies available for health services since the disaster?': '災後基本的醫療用品是否可用於衛生服務?',
'Are breast milk substitutes being used here since the disaster?': '在災難發生後母乳代用品是否被用在這裡?',
'Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?': '兒童,老人,和殘疾人士每天生活,嬉戲和走過的地區是否實際安全?',
'Are the chronically ill receiving sufficient care and assistance?': '長期病患者是否得到足夠的關心和幫助?',
'Are there adults living in prisons in this area?': '有成人活中prisons在此區域嗎?',
'Are there alternative places for studying?': '有替代工作區的研究嗎?',
'Are there cases of diarrhea among children under the age of 5?': '有diarrhea之間的情湟五歲以下兒童嗎?',
'Are there children living in adult prisons in this area?': '有子項活中有prisons在此區域嗎?',
'Are there children living in boarding schools in this area?': '有子項活在學校登機前在此區域嗎?',
'Are there children living in homes for disabled children in this area?': '有子項活在住家中的停用中這个區域嗎?',
'Are there children living in juvenile detention in this area?': '有子項活中青少年detention在此區域嗎?',
'Are there children living in orphanages in this area?': '有子項活中orphanages在此區域嗎?',
'Are there children with chronical illnesses in your community?': '有子項,含chronical疾病的社群嗎?',
'Are there health services functioning for the community since the disaster?': '有狀態服務運作的社群,因為災難?',
'Are there older people living in care homes in this area?': '有舊的人員使用者在管理Home在此區域嗎?',
'Are there older people with chronical illnesses in your community?': '有舊的人chronical疾病的社群嗎?',
'Are there people with chronical illnesses in your community?': '有人chronical疾病的社群嗎?',
'Are there separate latrines for women and men available?': '有个別latrines的男人或婦女,老人可用嗎?',
'Are there staff present and caring for the residents in these institutions?': '有人員存在与維護的居民在這些機构嗎?',
'Area': '區域 (area)',
'Areas inspected': '已視察地區',
'Assessment Details': '評量詳細資料',
'Assessment Reported': '評量報告',
'Assessment Summaries': '評量摘要',
'Assessment Summary Details': '評量摘要詳細資料',
'Assessment Summary added': '新增評量摘要',
'Assessment Summary deleted': '刪除評量摘要',
'Assessment Summary updated': '評量摘要更新',
'Assessment Type': '評量類型',
'Assessment Type:': '評量類型:',
'Assessment added': '新增評量',
'Assessment admin level': '評量管理層次',
'Assessment deleted': '評量刪除',
'Assessment timeline': '評量時間表',
'Assessment updated': '評量更新',
'Assessment': '評量',
'Assessments Needs vs. Activities': '需要評估与活動',
'Assessments and Activities': '評量及活動',
'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments': '評估是結构化報告來完成專業組織的資料包括WFP評量',
'Assessments are structured reports done by Professional Organizations': '評估是結构化報告來完成專業組織',
'Assessments': '評量',
'Assessments:': '評量:',
'Assessor': '評量者',
'Asset Assigned': '指派資產',
'Asset Assignment Details': '資產分派明細',
'Asset Assignments deleted': '資產分派刪除',
'Asset Assignments updated': '資產更新工作分派',
'Asset Assignments': '資產分派',
'Asset Details': '資產明細',
'Asset Log Details': '資產詳細資料日誌',
'Asset Log Empty': '資產空日誌',
'Asset Log Entry Added - Change Label': '資產日誌項目新增-變更標籤',
'Asset Log Entry deleted': '資產日誌項目刪除',
'Asset Log Entry updated': '資產日誌項目更新',
'Asset Log': '資產日誌',
'Asset Management': '資產管理',
'Asset Number': '資產編號',
'Asset added': '已新增資產',
'Asset deleted': '已刪除資產',
'Asset removed': '移除資產',
'Asset updated': '已更新資產',
'Asset': '資產',
'Assets are resources which are not consumable but are expected back, so they need tracking.': '資產是資源不易損耗部件,但是預期返回,所以他們所需要的追蹤。',
'Assets': '資產',
'Assign Asset': '指派資產',
'Assign Group': '指派給群組',
'Assign Staff': '指派人員',
'Assign Storage Location': '指定存儲位置',
'Assign to Org.': '指派給組織。',
'Assign to Organization': '指派給組織',
'Assign to Person': '指派給人員',
'Assign to Site': '指派給網站',
'Assign': '指派',
'Assigned By': '由指派',
'Assigned To': '指派給',
'Assigned to Organization': '指派給組織',
'Assigned to Person': '指派給人員',
'Assigned to Site': '指派給網站',
'Assigned to': '指派給',
'Assigned': '已指派',
'Assignments': '指派',
'Assistance for immediate repair/reconstruction of houses': '协助立即的修复/的重新安置',
'Assistant': '助理',
'Assisted Family Care': '輔助管理系列',
'Assisted Self-care': '輔助自我管理',
'At/Visited Location (not virtual)': '在/瀏览位置(非虛擬)',
'Attend to information sources as described in <instruction>': '参加"以資訊來源中所述<instruction>',
'Attribution': '賦值',
'Audit Read': '審核讀取',
'Audit Write': '寫入審核',
'Authentication failed!': '鉴別失敗!',
'Authentication information of foreign server.': '鉴別資訊的外來伺服器。',
'Author': '作者',
'Author:': '作者:',
'Automatic Database Synchronization History': '自動同步化歷程資料庫',
'Automotive': '汽車',
'Availability': '可用性',
'Available Alternative Inventories': '可用的替代庫存',
'Available Beds': '可用Beds',
'Available Inventories': '可用的庫存',
'Available Messages': '可用的訊息',
'Available Records': '可用的記錄',
'Available databases and tables': '可用的資料庫及表格',
'Available for Location': '可用的位置',
'Available from': '可用開始時間',
'Available in Viewer?': '可用的檢視器中?',
'Available until': '截止有效期',
'Availablity': '可用性',
'Avoid the subject event as per the <instruction>': '避免在主旨事件作為每个<instruction>',
'Babies who are not being breastfed, what are they being fed on?': '不接受母乳喂養的嬰兒吃什么?',
'Baby And Child Care': '嬰兒及幼兒護理',
'Background Colour for Text blocks': '文字區塊的背景顏色',
'Background Colour': '背景顏色',
'Baldness': '禿頭',
'Banana': '香蕉',
'Bank/micro finance': '銀行/MICRO財務',
'Barricades are needed': 'Barricades需要',
'Base Layer?': '基本層?',
'Base Layers': '基本層',
'Base Location': '基本位置',
'Base Site Set': '基本網站設定',
'Base Unit': '基本裝置',
'Baseline Data': '基準线資料',
'Baseline Number of Beds': '基線床位數',
'Baseline Type Details': '基準线類型詳細資料',
'Baseline Type added': '新增基準线類型',
'Baseline Type deleted': '刪除基準线類型',
'Baseline Type updated': '更新基準线類型',
'Baseline Type': '基準線類型',
'Baseline Types': '基準线類型',
'Baseline added': '新增基準线',
'Baseline deleted': '刪除基準线',
'Baseline number of beds of that type in this unit.': '在本單位這種類型病床的基線數目。',
'Baseline updated': '更新基準线',
'Baselines Details': '基準线詳細資料',
'Baselines': '基準線',
'Basic Assessment Reported': '基本評量報告',
'Basic Assessment': '基本評量',
'Basic Details': '基本詳細資料',
'Basic information on the requests and donations, such as category, the units, contact details and the status.': '基本資訊的要求和捐款,如:種類,裝置,請聯絡詳細資料和狀態。',
'Basic medical supplies available prior to disaster': '基本醫療用品可用之前災難',
'Basic medical supplies available since disaster': '醫療用品基本后提供災難',
'Basic reports on the Shelter and drill-down by region': '基本報告在Shelter和往下探查"區域',
'Baud rate to use for your modem - The default is safe for most cases': '传輸速率,以用于您的數据機的預設值是安全的大部分情湟',
'Baud': '傳輸速率',
'Beacon Service URL': '引標服務URL',
'Beam': '光束',
'Bed Capacity per Unit': '每單位床容量',
'Bed Capacity': '床容量',
'Bed Type': '床型',
'Bed type already registered': '床型已註冊',
'Bedding materials available': '床上用品材料可用',
'Below ground level': '地面以下',
'Beneficiary Type': '受益人類型',
'Biological Hazard': '生物危害',
'Blood Type (AB0)': '渾身類型(AB)',
'Blowing Snow': '沒有吹向他人雪',
'Boat': '船班',
'Bodies found': '找到主体',
'Bodies recovered': '回复主体',
'Body Recovery Reports': '主体回复報告',
'Body Recovery Request': '回复要求主体',
'Body Recovery Requests': '回复要求主体',
'Body': '主體',
'Bomb Explosion': 'Bomb爆炸',
'Bomb Threat': 'Bomb威胁',
'Border Colour for Text blocks': '邊框顏色的文字區塊',
'Bounding Box Insets': '嵌入外框',
'Bounding Box Size': '外框框大小',
'Boys 13-18 yrs in affected area': '13男女-18年期中受影响的區域',
'Boys 13-18 yrs not attending school': '13男女-18年期不参加學校',
'Boys 6-12 yrs in affected area': '六男女-12年期中受影响的區域',
'Boys 6-12 yrs not attending school': '六男女-12年期不参加學校',
'Brand Details': '品牌詳細資料',
'Brand added': '品牌新增',
'Brand deleted': '品牌刪除',
'Brand updated': '品牌更新',
'Brand': '產品',
'Brands': '品牌',
'Breast milk substitutes in use since disaster': 'Breast替換espresso使用中,因為災難',
'Breast milk substitutes used prior to disaster': 'Breast替換espresso使用之前災難',
'Bricks': '磚',
'Bridge Closed': '關閉橋接器',
'Bridge': '橋接器',
'Bucket': '儲存器 (bucket)',
'Buddhist': '佛教徒',
'Budget Details': '預算明細',
'Budget Updated': '更新預算',
'Budget added': '新增預算',
'Budget deleted': '刪除預算',
'Budget updated': '更新預算',
'Budget': '預算',
'Budgeting Module': '預算模組',
'Budgets': '預算',
'Buffer': 'buffer',
'Bug': '錯誤',
'Building Aide': 'AIDE建置',
'Building Assessments': '建置評量',
'Building Collapsed': '建置收合',
'Building Name': '大樓名稱',
'Building Safety Assessments': '建置安全評量',
'Building Short Name/Business Name': '建置簡短名稱/商業名稱',
'Building or storey leaning': '建置或storey leaning',
'Built using the Template agreed by a group of NGOs working together as the': '使用內建的范本所認可群組的迫切合作的',
'Bulk Uploader': '大量Multi File Uploader',
'Bundle Contents': '銷售組合內容',
'Bundle Details': '軟體組詳細資料',
'Bundle Updated': '更新軟体組',
'Bundle added': '新增軟体組',
'Bundle deleted': '刪除組',
'Bundle updated': '更新軟体組',
'Bundle': '組合 (bundle)',
'Bundles': '軟體組',
'Burn ICU': 'ICU燒錄',
'Burn': '燒錄',
'Burned/charred': '燒錄/charred',
'Business damaged': '商業損壞',
'By Facility': '由機能',
'By Inventory': '由庫存',
'By Site': '依網站',
'By Warehouse': '由倉儲',
'CBA Women': 'CBA婦女',
'CSS file %s not writable - unable to apply theme!': 'CSS檔%無法寫入-無法套用布景主題!',
'Calculate': '計算',
'Camp Coordination/Management': 'Camp协調/管理',
'Camp Details': 'Camp詳細資料',
'Camp Service Details': 'Camp服務詳細資料',
'Camp Service added': 'Camp服務新增',
'Camp Service deleted': 'Camp服務刪除',
'Camp Service updated': 'Camp服務更新',
'Camp Service': 'Camp服務',
'Camp Services': 'Camp服務',
'Camp Type Details': 'Camp類型詳細資料',
'Camp Type added': 'Camp新增類型',
'Camp Type deleted': 'Camp刪除類型',
'Camp Type updated': 'Camp更新類型',
'Camp Type': 'Camp類型',
'Camp Types and Services': 'Camp類型和服務',
'Camp Types': 'Camp類型',
'Camp added': 'Camp新增',
'Camp deleted': 'Camp刪除',
'Camp updated': 'Camp更新',
'Can only disable 1 record at a time!': '只能停用一个記錄時間!',
'Can users register themselves for authenticated login access?': '使用者可以自行登錄的鉴別登入嗎?',
'Cancel Log Entry': '取消日誌項目',
'Cancel Shipment': '取消出貨',
'Cancel': '取消',
'Canceled': '已取消',
'Candidate Matches for Body %s': '候選相符的主体%',
'Canned Fish': '預錄fish',
'Cannot be empty': '不能是空的',
'Cannot delete whilst there are linked records. Please delete linked records first.': '無法刪除時有的記錄。 請刪除鏈結的第一个記錄。',
'Cannot disable your own account!': '無法停用您自己的账户!',
'Capacity (Max Persons)': '容量(最大人員)',
'Capacity (W x D X H)': '容量(寬x深x)',
'Capacity': '容量',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': '擷取資訊的意外受損群組(Tourists,乘客,系列,等等。 )',
'Capture Information on each disaster victim': '擷取資訊在每个意外受損',
'Capturing organizational information of a relief organization and all the projects they have in the region': '擷取組織資訊的浮雕屬組織与其所有的專案區域中它們',
'Capturing the essential services each Volunteer is providing and where': '在擷取基本服務每个主動提供和位置',
'Capturing the projects each organization is providing and where': '在擷取專案每个組織提供和位置',
'Care Report': '管理報告',
'Care Strategy': '管理策略',
'Cash available to restart business': '現金用于重新啟動"商業',
'Casual Labor': '訪客勞工',
'Casualties': '意外',
'Catalog Details': '型錄詳細資料',
'Catalog Item added': '型錄項目新增',
'Catalog Item deleted': '型錄項目已刪除',
'Catalog Item updated': '型錄項目更新',
'Catalog Item': '型錄項目',
'Catalog Items': '型錄商品項目',
'Catalog Name': '型錄名稱',
'Catalog added': '新增型錄',
'Catalog deleted': '已刪除型錄',
'Catalog updated': '型錄更新',
'Catalog': '型錄 (catalog)',
'Catalogs': '型錄',
'Categories': '種類',
'Category': '類別',
'Category<>Sub-Category<>Catalog Relation added': 'Category<>Sub-Category<>Catalog新增關系',
'Category<>Sub-Category<>Catalog Relation deleted': 'Category<>Sub-Category<>Catalog關系刪除',
'Category<>Sub-Category<>Catalog Relation updated': 'Category<>Sub-Category<>Catalog關系更新',
'Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog關系',
'Ceilings, light fixtures': '天花板,燈退回',
'Central point to record details on People': '中心點來記錄詳細資料的人員',
'Certificate Catalog': '凭證型錄',
'Certificate Details': '憑證明細',
'Certificate Status': '憑證狀態',
'Certificate added': '添加凭證',
'Certificate deleted': '已刪除憑證',
'Certificate updated': '已更新憑證',
'Certificate': '凭證',
'Certificates': '憑證',
'Certification Details': '認證詳細資料',
'Certification added': '新增認證',
'Certification deleted': '刪除認證',
'Certification updated': '更新認證',
'Certification': '認證',
'Certifications': '認證',
'Certifying Organization': '組織認證',
'Change Password': '變更密碼',
'Check Request': '檢查要求',
'Check for errors in the URL, maybe the address was mistyped.': '檢查錯誤中的URL,可能的地址是輸入錯誤。',
'Check if the URL is pointing to a directory instead of a webpage.': '請檢查URL是否指向一个目錄,而一个網頁。',
'Check outbox for the message status': '檢查寄件匣的訊息狀態',
'Check to delete': '勾選以刪除',
'Check to delete:': '勾選以刪除:',
'Check': '檢查',
'Check-in': '移入',
'Check-out': '退房',
'Checklist created': '已建立核對清單',
'Checklist deleted': '刪除清單',
'Checklist of Operations': '作業核對清單',
'Checklist updated': '更新清單',
'Checklist': '核對清單',
'Chemical Hazard': '化學危害',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': '化學,生物, Radiological,核能或高產生爆炸的威胁或攻擊',
'Chicken': '雞肉',
'Child (2-11)': '兒童 (2-12)',
'Child (< 18 yrs)': '子項(< 18年期)',
'Child Abduction Emergency': '子項Abduction緊急',
'Child headed households (<18 yrs)': '子項頭住家(<18年期)',
'Child': '子項',
'Children (2-5 years)': '小孩(二到五歲)',
'Children (5-15 years)': '小孩(五到十五歲)',
'Children (< 2 years)': '小孩(不到兩歲)',
'Children in adult prisons': '小孩關在成人的監牢裏',
'Children in boarding schools': '小孩在住宿學校裏',
'Children in homes for disabled children': '子項在住家中的停用子項',
'Children in juvenile detention': '小孩在青少年監獄裏',
'Children in orphanages': '子項中orphanages',
'Children living on their own (without adults)': '子項使用者在自己的(不含成人)',
'Children not enrolled in new school': '子項不登記新學校',
'Children orphaned by the disaster': '子項遺留的災難',
'Children separated from their parents/caregivers': '子項分開母項/caregivers',
'Children that have been sent to safe places': '子項已传送到安全位置',
'Children who have disappeared since the disaster': '子項擁有消失,因為災難',
'Children with chronical illnesses': '与子項chronical疾病',
'Chinese (Taiwan)': '中文(台灣)',
'Chinese': '中文',
'Cholera Treatment Capability': 'Cholera處理功能',
'Cholera Treatment Center': 'Cholera處理中心',
'Cholera Treatment': 'Cholera處理',
'Cholera-Treatment-Center': 'Cholera-處理"-"置中"',
'Choose Manually': '選擇手動',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': '選擇一个新的張貼根据新的評估和團隊判斷。 嚴重狀湟影响整个建置資格是安全的發布。 嚴重區域化和整体中度條件可能需要使用上有限制。 檢查位置placard在御路。 所有其他后置placards在每个重要的正門入口。',
'Choose from one of the following options': '選擇下列其中一个選項',
'Choosing Skill and Resources of Volunteers': '選擇技能和資源的主動参与者',
'Christian': '基督徒',
'Church': '教堂',
'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': '的情湟disappearance,其他受害者/證人前次發現遺漏的人員在作用中。',
'City': '城市',
'Civil Emergency': '民事緊急',
'Clear Selection': '取消選擇',
'Click here to open log': '請按一下這裡來開啟日誌',
'Click on a Map': '按一下上一个對映',
'Click on an ID in the left-hand column to make a Pledge to match a request for aid.': '按一下ID中左手邊的直欄來進行抵押以符合要求的协助。',
'Click on the link %(url)s to reset your password': '按一下的鏈結 %(url)s 若要重設您的密碼',
'Click on the link %(url)s to verify your email': '按一下的鏈結 %(url)s 若要驗證您的電子郵件',
'Client IP': '用戶端 IP',
'Clinical Laboratory': '臨床實驗室',
'Clinical Operations': '臨床作業',
'Clinical Status': '臨床狀態',
'Closed': '結案',
'Closure': '結束',
'Clothing': '衣服',
'Cluster Details': '叢集詳細資料',
'Cluster Distance': '叢集距離',
'Cluster Subsector Details': '集群界別分組詳細資料',
'Cluster Subsector added': 'Subsector新增至叢集',
'Cluster Subsector deleted': 'Subsector刪除叢集',
'Cluster Subsector updated': '集群界別分組更新',
'Cluster Subsector': '叢集Subsector',
'Cluster Subsectors': '集群界別分組',
'Cluster Threshold': '集群臨界值',
'Cluster added': '新增叢集',
'Cluster deleted': '刪除叢集',
'Cluster updated': '更新集群',
'Cluster': '叢集',
'Cluster(s)': '叢集(S)',
'Clusters': '叢集',
'Code': '程式碼',
'Cold Wave': '冷Wave',
'Collapse, partial collapse, off foundation': '收合,局部收合", "關閉"基礎',
'Collective center': '群体中心',
'Colour for Underline of Subheadings': '顏色的底线的標題',
'Colour of Buttons when hovering': '的顏色按鈕限于暫留時',
'Colour of bottom of Buttons when not pressed': '新增顏色至底端的按鈕時按下',
'Colour of bottom of Buttons when pressed': '新增顏色至底端的按鈕按下時',
'Colour of dropdown menus': '顏色的下拉功能表',
'Colour of selected Input fields': '新增顏色至選取的輸入欄位',
'Colour of selected menu items': '新增顏色至選取的功能表項目',
'Column Choices (One Per Line': '直欄選項(每行一',
'Columns, pilasters, corbels': '直欄, pilasters, corbels',
'Combined Method': '合併方法',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': '稍后回來。 每个造訪此網站可能發生相同的問題。',
'Come back later.': '稍后回來。',
'Comments': '備註',
'Commercial/Offices': '商業/辦公室',
'Commit Date': '確定日期',
'Commit from %s': '確定從%s',
'Commit': '確定',
'Commit Status': '承諾狀態',
'Commiting a changed spreadsheet to the database': '正在確定變更資料庫,試算表',
'Commitment Added': '新增的承諾書',
'Commitment Canceled': '取消承諾',
'Commitment Details': '承諾書細節',
'Commitment Item Details': '承諾項目細節',
'Commitment Item added': '新增承諾項目',
'Commitment Item deleted': '已刪除之承諾項目',
'Commitment Item updated': '承諾項目更新',
'Commitment Item': '承諾項目',
'Commitment Items': '承諾項目',
'Commitment Status': '承諾狀態',
'Commitment Updated': '更新承諾',
'Commitment': '確定',
'Commitments': '承諾',
'Committed By': '確定由',
'Committed': '已確定',
'Committing Inventory': '確定庫存',
'Communication problems': '通訊問題',
'Community Centre': '中心社群',
'Community Health Center': '健康中心社群',
'Community Member': '社群成員',
'Competencies': '競爭力',
'Competency Details': '能力詳細資料',
'Competency Rating Catalog': '能力分級目錄',
'Competency Rating Details': '能力詳細分級',
'Competency Rating added': '能力新增分級',
'Competency Rating deleted': '能力刪除分級',
'Competency Rating updated': '能力更新評比',
'Competency Ratings': '能力等級',
'Competency added': '新增能力',
'Competency deleted': '刪除能力',
'Competency updated': '更新能力',
'Competency': '能力',
'Complete Database Synchronized': '完成資料庫同步',
'Complete Unit Label for e.g. meter for m.': '完成單元的標籤(如的計量M。',
'Complete': '完成',
'Completed': '已完成',
'Compose': '傳訊',
'Compromised': '受損',
'Concrete frame': '具体框架',
'Concrete shear wall': '具体銳角牆面',
'Condition': '條件',
'Config added': '新增配置',
'Config deleted': '刪除配置',
'Config updated': '更新配置',
'Config': '配置',
'Configs': 'configs',
'Configurations': '配置',
'Configure Run-time Settings': '配置執行時期設定',
'Confirm Shipment Received': '確認出貨接收',
'Confirmed Incidents': '確認事件',
'Confirmed': '已確認',
'Confirming Organization': '確認組織',
'Conflict Details': '衝突明細',
'Conflict Resolution': '衝突解決',
'Consignment Note': '寄售附註',
'Constraints Only': '僅限制',
'Consumable': '消耗品',
'Contact Data': '聯絡資料',
'Contact Details': '聯絡人詳細資料',
'Contact Info': '聯絡資訊',
'Contact Information Added': '新增聯絡資訊',
'Contact Information Deleted': '刪除聯絡資訊',
'Contact Information Updated': '更新聯絡資訊',
'Contact Information': '聯絡資訊',
'Contact Method': '聯絡方式',
'Contact Name': '聯絡人名稱',
'Contact Person': '聯絡人',
'Contact Phone': '聯絡電話',
'Contact details': '聯絡人詳細資料',
'Contact information added': '新增聯絡資訊',
'Contact information deleted': '刪除聯絡資訊',
'Contact information updated': '更新聯絡資訊',
'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': '聯絡人的情湟下新聞或其他問題(如果不同報告人員)。 包括電話號碼,地址和電子郵件作為可用。',
'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': '聯絡人(s)的情湟下新聞或其他問題(如果与報告人員)。 包括電話號碼,地址和電子郵件作為可用。',
'Contact us': '聯絡我們',
'Contact': '聯絡人',
'Contacts': '聯絡人',
'Contents': '目錄',
'Contradictory values!': '互相矛盾的值!',
'Contributor': '提供者',
'Conversion Tool': '轉換工具',
'Cooking NFIs': 'NFIs調理油',
'Cooking Oil': '食用油',
'Coordinate Conversion': '座標轉換',
'Coping Activities': '复制活動',
'Copy any data from the one to be deleted into the one to keep': '复制任何資料從一个要刪除的一个保留',
'Copy': '複製',
'Corn': '玉米粉',
'Cost Type': '成本類型',
'Cost per Megabyte': '每MB成本',
'Cost per Minute': '成本每分鐘',
'Country of Residence': '居住國家',
'Country': '國家',
'County': '州政府',
'Course Catalog': '課程型錄',
'Course Certicate Details': '課程凭證詳細資料',
'Course Certicate added': '課程凭證新增至',
'Course Certicate deleted': '課程刪除凭證',
'Course Certicate updated': '課程更新凭證',
'Course Certicates': '課程凭證',
'Course Certificates': '課程凭證',
'Course Details': '課程詳細資料',
'Course added': '課程新增',
'Course deleted': '刪除進程',
'Course updated': '課程更新',
'Course': '課程',
'Courses': '課程',
'Create & manage Distribution groups to receive Alerts': '建立並管理發送通知的群組',
'Create Activity Report': '新增活動報告',
'Create Activity Type': '新增活動類型',
'Create Activity': '新增活動',
'Create Assessment': '新增評量',
'Create Asset': '新增資產',
'Create Bed Type': '新增平台類型',
'Create Brand': '新增品牌',
'Create Budget': '新增預算',
'Create Catalog Item': '新增型錄項目',
'Create Catalog': '新增型錄',
'Create Certificate': '新增憑證',
'Create Checklist': '建立核對清單',
'Create Cholera Treatment Capability Information': '新增Cholera處理功能資訊',
'Create Cluster Subsector': '新增叢集Subsector',
'Create Cluster': '新增叢集',
'Create Competency Rating': '新增能力分級',
'Create Contact': '新增聯絡人',
'Create Course': '新增課程',
'Create Dead Body Report': '新增停用主体報告',
'Create Event': '建立新的事件',
'Create Facility': '新增機能',
'Create Feature Layer': '新增功能層',
'Create Group Entry': '新增群組',
'Create Group': '新增群組',
'Create Hospital': '新增醫院',
'Create Identification Report': '新增識別報告',
'Create Impact Assessment': '建立影响評估',
'Create Import Job': '建立匯入工作',
'Create Incident Report': '新增事件報告',
'Create Incident': '新增事件',
'Create Item Category': '新增項目種類',
'Create Item Pack': '新增項目套件',
'Create Item': '新增項目',
'Create Kit': '新增套件',
'Create Layer': '新增層',
'Create Location': '新增位置',
'Create Map Profile': '新增對映配置',
'Create Marker': '新增標記',
'Create Member': '新增成員',
'Create Mobile Impact Assessment': '建立行動式影响評估',
'Create Office': '新增辦公室',
'Create Organization': '新增組織',
'Create Personal Effects': '新增个人效果',
'Create Project': '新增專案',
'Create Projection': '新增投射',
'Create Rapid Assessment': '建立快速評量',
'Create Report': '新增新報告',
'Create Request': '建立要求',
'Create Resource': '新增資源',
'Create River': '新增金水河',
'Create Role': '新增角色',
'Create Room': '新增室',
'Create Scenario': '建立新情境',
'Create Sector': '新增行業',
'Create Service Profile': '新增服務設定檔',
'Create Shelter Service': '新增Shelter服務',
'Create Shelter Type': '新增Shelter類型',
'Create Shelter': '新增Shelter',
'Create Skill Type': '新增技術類型',
'Create Skill': '新增技能',
'Create Staff Member': '新增人員',
'Create Status': '新增狀態',
'Create Task': '新增作業',
'Create Theme': '新增佈景主題',
'Create User': '新增使用者',
'Create Volunteer': '新增志工',
'Create Warehouse': '新增倉儲',
'Create a Person': '新增人員',
'Create a group entry in the registry.': '在登錄表中建立群組.',
'Create, enter, and manage surveys.': '建立,進入,以及管理調查。',
'Creation of Surveys': '建立的調查',
'Credential Details': '認證詳細資料',
'Credential added': '新增認證',
'Credential deleted': '刪除認證',
'Credential updated': '更新認證',
'Credentialling Organization': 'Credentialling組織',
'Credentials': '認證',
'Credit Card': '信用卡',
'Crime': '犯罪',
'Criteria': '準則',
'Currency': '貨幣',
'Current Entries': '現行項目',
'Current Group Members': '現有組員',
'Current Identities': '現行身分',
'Current Location': '目前地點',
'Current Log Entries': '現行日誌項目',
'Current Memberships': '現行的成員資格',
'Current Notes': '現行Notes',
'Current Records': '現行記錄',
'Current Registrations': '目前登錄',
'Current Status': '現行狀態',
'Current Team Members': '現行團隊成員',
'Current Twitter account': '現行Twitter账户',
'Current community priorities': '現行社群优先順序',
'Current general needs': '目前的一般需求',
'Current greatest needs of vulnerable groups': '現行最大的需求有漏洞的群組',
'Current health problems': '現行性能問題',
'Current main income sources': '目前主要收入來源',
'Current major expenses': '目前主要費用',
'Current number of patients': '病患的現行數目',
'Current problems, categories': '現行問題,種類',
'Current problems, details': '現行問題,詳細資料',
'Current request': '現行要求',
'Current response': '現行回應',
'Current session': '現行階段作業',
'Current type of health problems, adults': '現行類型的性能問題,成人',
'Current type of health problems, children': '現行類型的性能問題,子項',
'Current type of source for drinking water': '現行的來源類型為"Drinking Water Protection"臨界值',
'Current type of source for sanitary water': '現行類型的來源卫生臨界值',
'Currently no Certifications registered': '目前沒有認證登錄',
'Currently no Competencies registered': '目前沒有登錄能力',
'Currently no Course Certicates registered': '目前沒有進程凭證登錄',
'Currently no Credentials registered': '目前沒有認證登錄',
'Currently no Missions registered': '目前沒有任務註冊',
'Currently no Skill Equivalences registered': '技能目前沒有同等登錄',
'Currently no Trainings registered': '目前沒有登錄撰文',
'Currently no entries in the catalog': '在型錄中目前沒有項目',
'Currently your system has default username and password. Username and Password are required by foriegn machines to sync data with your computer. You may set a username and password so that only those machines can fetch and submit data to your machines which your grant access by sharing your password.': '目前您的系统已預設使用者名稱及密碼。 使用者名稱及密碼所需的外部機器來同步資料与您的電腦。 您可以設定使用者名稱和密碼,以便只機器可以提取及提交資料至您的機器的存取權授予的共享密碼。',
'Custom Database Resource (e.g., anything defined as a resource in Sahana)': '自訂資料庫資源(例如,任何定义為中的資源Sahana)',
'Customisable category of aid': '可輔助的種類',
'DECISION': '決策',
'DNA Profile': 'dna設定檔',
'DNA Profiling': 'dna側寫',
'DVI Navigator': 'DVI導览器',
'Daily': '每日',
'Dam Overflow': 'DAM溢位',
'Damage': '損壞',
'Dangerous Person': '危險的人員',
'Dashboard': '儀表版',
'Data import policy': '資料匯入原則',
'Data uploaded': '上传資料',
'Data': '資料',
'Database': '資料庫',
'Date & Time': '日期和時間',
'Date Avaialble': 'install.log日期',
'Date Available': '可出貨日期',
'Date Received': '收到的日期',
'Date Requested': '要求日期',
'Date Required': '需要的日期',
'Date Sent': '傳送日期',
'Date Until': '日期之前',
'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': '日期和時間的貨品收据。 依預設顯示目前的時間,但可以修改中編輯的下拉列表。',
'Date and Time': '日期與時間',
'Date and time this report relates to.': '報告日期与時間相關。',
'Date of Birth': '出生日期',
'Date of Latest Information on Beneficiaries Reached': '日期的最新資訊達到受益人',
'Date of Report': '報告的日期',
'Date': '日期',
'Date/Time of Find': '尋找的日期/時間',
'Date/Time of disappearance': '日期/時間disappearance',
'Date/Time when found': '找到日期/時間',
'Date/Time when last seen': '日期/時間前次看到',
'Date/Time': '日期/時間',
'De-duplicator': 'DE-duplicator',
'Dead Body Details': '停用主体詳細資料',
'Dead Body Reports': '停用主体報告',
'Dead Body': '停用主体',
'Dead body report added': '停用主体新增報告',
'Dead body report deleted': '停用主体報告刪除',
'Dead body report updated': '停用主体報告更新',
'Deaths in the past 24h': '過去24小時的死亡人數',
'Debug': '除錯',
'Decimal Degrees': '小數度',
'Decision': '決策',
'Decomposed': '分解',
'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': '預設高度的對映"視窗。 視窗布置對映maximises來填入視窗,讓您不需要設定較大的值在這裡。',
'Default Height of the map window.': '預設高度的對映"視窗。',
'Default Map': '預設對映',
'Default Marker': '預設標記',
'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': '預設寬度的對映"視窗。 視窗布置對映maximises來填入視窗,讓您不需要設定較大的值在這裡。',
'Default Width of the map window.': '預設寬度的對映"視窗。',
'Default synchronization policy': '預設同步化原則',
'Defaults updated': '預設更新',
'Defaults': '預設值',
'Defecation area for animals': 'Defecation區域的祥禽瑞獸',
'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': '定义的實務配置適當的資源(人力,資產和設備)。',
'Defines the icon used for display of features on handheld GPS.': '用于定义圖示的顯示功能的掌上型GPS。',
'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.': '用于定义圖示的顯示功能的互動式對映和KML匯出。 一个標記,指派給个別位置設定時需要置換的記號指派給功能類別。 如果未定义,則預設記號使用。',
'Defines the icon used for display of features on interactive map & KML exports.': '用于定义圖示的顯示功能的互動式對映和KML匯出。',
'Defines the marker used for display & the attributes visible in the popup.': '定义標記用于顯示的屬性顯示在蹦現式畫面。',
'Degrees must be a number between -180 and 180': '度必须是一个數字的180和180',
'Degrees must be between -180 and 180': '度必须介于180和180',
'Degrees should be greater than 0 and less than 180': '度應該大于〇且小于180',
'Delete Aid Request': '刪除輔助請求',
'Delete Alternative Item': '刪除替代項目',
'Delete Assessment Summary': '刪除評量摘要',
'Delete Assessment': '刪除評量',
'Delete Asset Assignments': '刪除資產分派',
'Delete Asset Log Entry': '刪除資產日誌項目',
'Delete Asset': '刪除資產',
'Delete Baseline Type': '刪除基準线類型',
'Delete Baseline': '刪除基準線',
'Delete Brand': '刪除品牌',
'Delete Budget': '刪除預算',
'Delete Bundle': '刪除軟體組',
'Delete Catalog Item': '刪除型錄項目',
'Delete Catalog': '刪除型錄',
'Delete Certificate': '刪除凭證',
'Delete Certification': '刪除認證',
'Delete Cluster Subsector': '刪除叢集Subsector',
'Delete Cluster': '刪除叢集',
'Delete Commitment Item': '刪除承諾項目',
'Delete Commitment': '刪除承諾',
'Delete Competency Rating': '刪除能力分級',
'Delete Competency': '刪除能力',
'Delete Config': '刪除配置',
'Delete Contact Information': '刪除聯絡人資訊',
'Delete Course Certicate': '刪除進程證書',
'Delete Course': '刪除進程',
'Delete Credential': '刪除認證',
'Delete Distribution Item': '刪除分配項目',
'Delete Distribution': '刪除配送',
'Delete Document': '刪除文件',
'Delete Donor': '刪除Donor',
'Delete Entry': '刪除項目',
'Delete Event': '刪除事件',
'Delete Feature Layer': '刪除功能層',
'Delete Group': '刪除群組',
'Delete Hospital': '刪除醫院',
'Delete Image': '刪除影像',
'Delete Impact Type': '刪除影响類型',
'Delete Impact': '刪除影響',
'Delete Incident Report': '刪除事故報告',
'Delete Incident': '刪除事件',
'Delete Inventory Item': '刪除庫存項目',
'Delete Inventory Store': '刪除資產儲存庫',
'Delete Item Category': '刪除項目種類',
'Delete Item Pack': '刪除項目套件',
'Delete Item': '刪除項目',
'Delete Job Role': '刪除工作角色',
'Delete Key': '刪除金鑰',
'Delete Kit': '刪除套件',
'Delete Landmark': '刪除里程碑',
'Delete Layer': '刪除層',
'Delete Level 1 Assessment': '刪除層次一評量',
'Delete Level 2 Assessment': '刪除層次二評量',
'Delete Location': '刪除位置',
'Delete Map Profile': '刪除對映配置',
'Delete Marker': '刪除標記',
'Delete Membership': '刪除組員',
'Delete Message': '刪除訊息',
'Delete Metadata': '刪除 Meta 資料',
'Delete Mission': '刪除任務',
'Delete Need Type': '刪除需求類型',
'Delete Need': '需要刪除',
'Delete Office': '刪除辦公室',
'Delete Old': '刪除舊',
'Delete Organization': '刪除組織',
'Delete Peer': '刪除同層級',
'Delete Person': '刪除人員',
'Delete Photo': '刪除照片',
'Delete Population Statistic': '刪除人口统計資料',
'Delete Position': '刪除位置',
'Delete Project': '刪除專案',
'Delete Projection': '刪除投射',
'Delete Rapid Assessment': '刪除快速評量',
'Delete Received Item': '刪除接收項目',
'Delete Received Shipment': '刪除接收出貨',
'Delete Record': '刪除記錄',
'Delete Recovery Report': '刪除回复報告',
'Delete Report': '刪除報告',
'Delete Request Item': '刪除要求項目',
'Delete Request': '刪除要求',
'Delete Resource': '刪除資源',
'Delete Room': '刪除會議室',
'Delete Scenario': '刪除實務範例',
'Delete Section': '刪除區段',
'Delete Sector': '刪除磁區',
'Delete Sent Item': '刪除传送項目',
'Delete Sent Shipment': '刪除传送出貨',
'Delete Service Profile': '刪除服務設定檔',
'Delete Setting': '刪除設定',
'Delete Skill Equivalence': '刪除技術等值',
'Delete Skill Provision': '刪除技術供應',
'Delete Skill Type': '刪除技術類型',
'Delete Skill': '刪除技術',
'Delete Staff Type': '刪除人員類型',
'Delete Status': '刪除狀態',
'Delete Subscription': '刪除訂閱',
'Delete Subsector': '刪除Subsector',
'Delete Survey Answer': '刪除調查回答',
'Delete Survey Question': '刪除調查問題',
'Delete Survey Section': '刪除調查區段',
'Delete Survey Series': '刪除調查系列',
'Delete Survey Template': '刪除調查范本',
'Delete Training': '刪除訓練',
'Delete Unit': '刪除單元',
'Delete User': '刪除使用者',
'Delete Volunteer': '刪除志願者',
'Delete Warehouse': '刪除倉庫',
'Delete from Server?': '刪除從伺服器嗎?',
'Delete': '刪除',
'Delivered': '已遞送',
'Delphi Decision Maker': '專案群組決策',
'Demographic': '人口統計學',
'Demonstrations': '示範',
'Dental Examination': '牙齒檢查',
'Dental Profile': '牙齒設定檔',
'Department/Unit Name': '部門/單元名稱',
'Deployment': '部署',
'Describe the condition of the roads to your hospital.': '描述條件的街道的醫院。',
"Describe the procedure which this record relates to (e.g. 'medical examination')": '描述程序此記錄的關系(例如, "醫學examination")',
'Description of Bin Type': '說明bin的類型',
'Description of Contacts': '說明的聯絡人',
'Description of defecation area': '說明的defecation區域',
'Description of drinking water source': '說明的"Drinking Water Protection"臨界值來源',
'Description of sanitary water source': '說明的卫生臨界值來源',
'Description of water source before the disaster': '說明的水來源前的災難',
'Description': '說明',
'Descriptive Text (e.g., Prose, etc)': '說明文字(例如, Prose等)',
'Designated for': '指定的',
'Desire to remain with family': '希望與家人同處',
'Destination': '目的地',
'Destroyed': '已毀損',
'Details field is required!': '詳細資料欄位是必要的!',
'Details': '詳細資料',
'Diaphragms, horizontal bracing': '膜片,水平支撐',
'Diarrhea among children under 5': '5歲以下兒童腹瀉',
'Dignitary Visit': '要人訪問',
'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': '維度的儲存体bin。 輸入下列格式的一x二x三的寬度x深度x高度,然后選擇單位下拉清單。',
'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': '維度的儲存体位置。 輸入下列格式的一x二x三的寬度x深度x高度,然后選擇單位下拉清單。',
'Direction': '方向',
'Disabilities': '殘障人士',
'Disable': '停用',
'Disabled participating in coping activities': '殘疾人士參與應對活動',
'Disabled': '已停用',
'Disabled?': '殘疾人士?',
'Disaster Victim Identification': '災民身份識別',
'Disaster Victim Registry': '災民登錄',
'Disaster clean-up/repairs': '災難up/repairs清除',
'Discharge (cusecs)': '放電(cusecs)',
'Discharges/24hrs': '放電/24hrs',
'Discussion Forum on item': '討論論壇上項目',
'Discussion Forum': '討論區',
'Disease vectors': '疾病向量',
'Diseases': '疾病',
'Dispatch Items': '分派項目',
'Dispatch': '分派',
'Displaced Populations': '移離个体群',
'Displaced': '移離',
'Display Polygons?': '顯示多邊形?',
'Display Routes?': '顯示路由?',
'Display Tracks?': '顯示追蹤?',
'Display Waypoints?': '顯示路迳點?',
'Dispose Expired/Unusable Items': '處置過期/無法使用的項目',
'Dispose': '處置',
'Distance between defecation area and water source': '距離defecation區域和臨界值來源',
'Distance between latrines and temporary shelter in meters': '距離latrines及暫時shelter以公尺為單位',
'Distance between shelter and latrines': '距離shelter和latrines',
'Distance from %s:': '距離%s:',
'Distance(Kms)': '距離(Kms)',
'Distribution Details': '配送明細',
'Distribution Item Details': '分配項目詳細資料',
'Distribution Item added': '分配項目新增',
'Distribution Item deleted': '分配項目刪除',
'Distribution Item updated': '配送更新項目',
'Distribution Item': '項目分配',
'Distribution Items': '項目分配',
'Distribution added': '配送新增',
'Distribution deleted': '刪除分配',
'Distribution groups': '收件群組',
'Distribution updated': '配送更新',
'Distribution': '發行套件',
'Distributions': '分配',
'District': 'district',
'Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '請adolescent和泉您社群中参与活動,协助他們處理災難? (例如, 會議,宗教活動,主動社群中的清除,等等)',
'Do households each have at least 2 containers (10-20 litres each) to hold water?': '每個家庭至少有兩個儲水器(每個10-20公升)儲水嗎?',
'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?': '每個家庭是否有適當的烹調設備和材料來煮食(爐,壺,盤,碟,杯等)?',
'Do households have bedding materials available (tarps, plastic mats, blankets)?': '每個家庭是否有被鋪(防水布,塑料墊子,毯子)?',
'Do households have household water storage containers?': '每個家庭是否有儲水器?',
'Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '做生意您社群中成員参与活動,协助他們處理災難? (例如, 會議,宗教活動,主動社群中的清除,等等)',
'Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '舊做您社群中的人員参与活動,协助他們處理災難? (例如, 會議,宗教活動,主動社群中的清除,等等)',
'Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?': '人們是否有至少2套完整的服裝(襯衫,褲子/紗籠,內衣)?',
'Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?': '人們是否能可靠地獲得足夠的衛生/衛生用品(沐浴香皂,洗衣皂,洗髮水,牙膏和牙刷)?',
'Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '請殘障人士您社群中参与活動,协助他們處理災難? (例如, 會議,宗教活動,主動社群中的清除,等等)',
'Do women and girls have easy access to sanitary materials?': '婦女做和女孩輕松存取卫生資料嗎?',
'Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '婦女做您社群中参与活動,协助他們處理災難? (例如, 會議,宗教活動,主動社群中的清除,等等)',
'Do you have access to cash to restart your business?': '您有權存取現金重新啟動您的業務?',
'Do you know of any incidents of violence?': '您知道的任何事件的暴力嗎?',
'Do you know of children living on their own (without adults)?': '您知道子項使用者在自己的(不含成人)?',
'Do you know of children separated from their parents or caregivers?': '您知道子項分開母項或caregivers嗎?',
'Do you know of children that have been orphaned by the disaster?': '您知道子項已遺留的災難?',
'Do you know of children that have been sent to safe places?': '您知道子項的已传送至安全工作區嗎?',
'Do you know of children that have disappeared without explanation in the period since the disaster?': '您知道子項的消失而無任何說明在此期間,因為災難?',
'Do you know of older people who are primary caregivers of children?': '您知道舊的人員是主要caregivers的子項嗎?',
'Do you know of parents/caregivers missing children?': '您知道的母項/caregivers遺漏子項嗎?',
'Do you really want to delete these records?': '您確定要刪除這些記錄嗎?',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': '您要取消此接收出貨? 項目將從庫存。 這个動作無法复原!',
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': '您要取消此传送出貨? 項目將回到庫存。 這个動作無法复原!',
'Do you want to over-write the file metadata with new default values?': '您要改寫檔案寫入meta資料与新的預設值嗎?',
'Do you want to receive this shipment?': '您要接收此出貨?',
'Do you want to send these Committed items?': '您要传送這些已確定的項目嗎?',
'Do you want to send this shipment?': '您要传送此出貨?',
'Document Details': '文件詳細資料',
'Document Library': '文件庫',
'Document Scan': '文件掃描',
'Document added': '新增文件',
'Document deleted': '文件已刪除',
'Document updated': '已更新的文件',
'Document': '文件',
'Documents and Photos': '文件和照片',
'Documents': '文件',
'Does this facility provide a cholera treatment center?': '該設施是否提供霍亂治療中心?',
'Doing nothing (no structured activity)': '什麼都不做(沒有結構化的活動)',
'Dollars': '美元',
'Domain': '網域',
'Domestic chores': '家務',
'Donated': '已捐贈',
'Donation Certificate': '捐贈證書',
'Donation Phone #': '捐贈電話號碼',
'Donor Details': 'Donor詳細資料',
'Donor added': '新增Donor',
'Donor deleted': '刪除Donor',
'Donor updated': '已更新捐贈者',
'Donors Report': 'Donors報告',
'Door frame': '門框架',
'Download PDF': '下載 PDF',
'Draft Features': '草稿功能',
'Draft': '草稿',
'Drainage': '排水',
'Drawing up a Budget for Staff & Equipment across various Locations.': '一个繪圖預算人員和設備各位置。',
'Drill Down by Group': '展開依群組',
'Drill Down by Incident': '展開事件',
'Drill Down by Shelter': '往下探查來Shelter',
'Driving License': '駕照',
'Drugs': '藥物',
'Dug Well': '挖出以及',
'Duplicate?': '重複?',
'Duration': '持續時間',
'Dust Storm': '暴雨灰塵',
'Dwelling': '住宅',
'EMS Reason': 'EMS原因',
'EMS Status Reason': 'EMS狀態原因',
'EMS Status': 'EMS狀態',
'EMS Traffic Status': 'EMS狀態传輸',
'ER Status Reason': 'ER狀態原因',
'ER Status': 'ER狀態',
'Early Recovery': '早期回复',
'Earthquake': '地震',
'Easy access to sanitation items for women/girls': '容易存取設施的項目婦女/女孩',
'Edit Activity': '編輯活動',
'Edit Address': '編輯地址',
'Edit Aid Request': '編輯輔助請求',
'Edit Alternative Item': '編輯替代項目',
'Edit Application': '編輯應用程式',
'Edit Assessment Summary': '編輯評量摘要',
'Edit Assessment': '編輯評量',
'Edit Asset Assignment': '編輯資產分派',
'Edit Asset Log Entry': '編輯資產日誌項目',
'Edit Asset': '編輯資產',
'Edit Baseline Type': '編輯基準线類型',
'Edit Baseline': '編輯基準线',
'Edit Brand': '編輯品牌',
'Edit Budget': '編輯預算',
'Edit Bundle': '編輯軟体組',
'Edit Camp Service': 'Camp編輯服務',
'Edit Camp Type': '編輯營式',
'Edit Camp': '編輯Camp',
'Edit Catalog Item': '編輯型錄項目',
'Edit Catalog': '編輯目錄',
'Edit Category<>Sub-Category<>Catalog Relation': '編輯Category<>Sub-Category<>Catalog關系',
'Edit Certificate': '編輯證書',
'Edit Certification': '編輯認證',
'Edit Cluster Subsector': '編輯叢集Subsector',
'Edit Cluster': '編輯叢集',
'Edit Commitment Item': '編輯承諾項目',
'Edit Commitment': '編輯承諾',
'Edit Competency Rating': '編輯能力分級',
'Edit Competency': '編輯能力',
'Edit Config': '編輯配置',
'Edit Contact Information': '編輯聯絡資訊',
'Edit Contact': '編輯聯絡人',
'Edit Contents': '編輯內容',
'Edit Course Certicate': '編輯課程證書',
'Edit Course': '編輯課程',
'Edit Credential': '編輯認證',
'Edit Dead Body Details': '編輯停用主体詳細資料',
'Edit Defaults': '編輯預設值',
'Edit Description': '編輯說明',
'Edit Details': '編輯詳細資料',
'Edit Disaster Victims': '編輯災難受害者',
'Edit Distribution Item': '編輯項目分配',
'Edit Distribution': '編輯配送',
'Edit Document': '編輯文件',
'Edit Donor': '編輯Donor',
'Edit Email Settings': '編輯電子郵件設定',
'Edit Entry': '編輯條目',
'Edit Event': '編輯事件',
'Edit Facility': '編輯設備',
'Edit Feature Layer': '編輯功能層',
'Edit Flood Report': '水災編輯報告',
'Edit Gateway Settings': '編輯設定閘道',
'Edit Group': '編輯群組',
'Edit Hospital': '編輯醫院',
'Edit Human Resource': '編輯人力資源',
'Edit Identification Report': '編輯識別報告',
'Edit Identity': '編輯身分',
'Edit Image Details': '編輯映像檔詳細資料',
'Edit Image': '編輯影像',
'Edit Impact Type': '編輯影响類型',
'Edit Impact': '編輯影响',
'Edit Incident Report': '編輯事件報告',
'Edit Incident': '編輯事件',
'Edit Inventory Item': '編輯庫存項目',
'Edit Inventory Location': '編輯庫存位置',
'Edit Inventory Store': '編輯配備盤點儲存',
'Edit Item Catalog Categories': '編輯型錄種類項目',
'Edit Item Catalog': '編輯項目型錄',
'Edit Item Category': '編輯項目種類',
'Edit Item Pack': '編輯項目套件',
'Edit Item Sub-Categories': '編輯項目子種類',
'Edit Item': '編輯項目',
'Edit Job Role': '編輯工作角色',
'Edit Key': '編輯索引鍵',
'Edit Kit': '編輯套件',
'Edit Landmark': '編輯里程碑',
'Edit Layer': '編輯層',
'Edit Level %d Locations?': '編輯層次%d位置?',
'Edit Level 1 Assessment': '編輯層次一評量',
'Edit Level 2 Assessment': '編輯層次二評量',
'Edit Location': '編輯位置',
'Edit Log Entry': '編輯日誌項目',
'Edit Map Profile': '編輯對映配置',
'Edit Map Services': '編輯對映服務',
'Edit Marker': '編輯標記',
'Edit Membership': '編輯成員資格',
'Edit Message': '編輯訊息',
'Edit Messaging Settings': '編輯传訊設定',
'Edit Metadata': '編輯 meta 資料',
'Edit Mission': '編輯任務',
'Edit Modem Settings': '編輯數据機設定',
'Edit Need Type': '需要編輯類型',
'Edit Need': '需要編輯',
'Edit Note': '編輯附註',
'Edit Office': '編輯辦公室',
'Edit Options': '編輯選項',
'Edit Organization': '編輯組織',
'Edit Parameters': '編輯參數',
'Edit Partner': '編輯伙伴',
'Edit Peer Details': '編輯層級詳細資料',
'Edit Peer': '編輯同層級',
'Edit Person Details': '編輯人員詳細資料',
'Edit Personal Effects Details': '編輯个人效果詳細資料',
'Edit Photo': '編輯照片',
'Edit Pledge': '編輯質押',
'Edit Population Statistic': '編輯人口统計資料',
'Edit Position': '編輯位置',
'Edit Problem': '編輯問題',
'Edit Project': '編輯專案',
'Edit Projection': '編輯投射',
'Edit Rapid Assessment': '編輯快速評量',
'Edit Received Item': '編輯接收項目',
'Edit Received Shipment': '編輯收到出貨',
'Edit Record': '編輯記錄',
'Edit Recovery Details': '編輯回复明細',
'Edit Registration Details': '編輯登錄詳細資料',
'Edit Registration': '編輯登錄',
'Edit Report': '編輯報告',
'Edit Request Item': '編輯要求項目',
'Edit Request': '編輯要求',
'Edit Resource': '編輯資源',
'Edit Response': '編輯回應',
'Edit River': '編輯金水河',
'Edit Role': '編輯角色',
'Edit Room': '編輯室',
'Edit Scenario': '編輯範例情節',
'Edit School District': '編輯學校特區',
'Edit School Report': '編輯學校報告',
'Edit Section': '編輯區段',
'Edit Sector': '編輯磁區',
'Edit Sent Item': '传送編輯項目',
'Edit Setting': '編輯設定',
'Edit Settings': '編輯設定',
'Edit Shelter Service': '編輯Shelter服務',
'Edit Shelter Type': '編輯Shelter類型',
'Edit Shelter': '編輯Shelter',
'Edit Shipment Transit Log': '編輯出貨传輸日誌',
'Edit Shipment to Send': '編輯出貨以传送',
'Edit Shipment/Way Bills': '出貨/編輯方式账單',
'Edit Shipment<>Item Relation': '編輯Shipment<>Item關系',
'Edit Site': '編輯網站',
'Edit Skill Equivalence': '編輯等值技能',
'Edit Skill Provision': '編輯技術供應',
'Edit Skill Type': '編輯技術類型',
'Edit Skill': '編輯技術',
'Edit Solution': '編輯解決方案',
'Edit Source': '編輯原始碼',
'Edit Staff Type': '編輯人員類型',
'Edit Staff': '編輯人員',
'Edit Storage Bin Type(s)': '編輯儲存体bin類型(S)',
'Edit Storage Bins': '編輯儲存体紙匣',
'Edit Storage Location': '編輯儲存体位置',
'Edit Subscription': '編輯訂閱',
'Edit Subsector': '編輯Subsector',
'Edit Survey Answer': '編輯調查回答',
'Edit Survey Question': '編輯調查問題',
'Edit Survey Section': '編輯調查區段',
'Edit Survey Series': '編輯調查系列',
'Edit Survey Template': '編輯調查范本',
'Edit Sync Settings': '編輯同步設定',
'Edit Task': '編輯作業',
'Edit Team': '編輯團隊',
'Edit Theme': '編輯佈景主題',
'Edit Themes': '編輯布景主題',
'Edit Ticket': '編輯單',
'Edit Track': '編輯追蹤',
'Edit Training': '編輯培訓',
'Edit Tropo Settings': '編輯Tropo設定',
'Edit Unit': '編輯單元',
'Edit Update': '編輯更新',
'Edit User': '編輯使用者',
'Edit Volunteer Availability': '編輯自愿可用性',
'Edit Volunteer Details': '編輯自愿詳細資料',
'Edit Volunteer Registration': '編輯自愿登錄',
'Edit Warehouse': '編輯倉儲',
'Edit current record': '編輯現行記錄',
'Edit message': '編輯訊息',
'Edit the Application': '編輯應用程式',
'Edit': '編輯',
'Editable?': '可編輯?',
'Education materials received': '教育材料接收',
'Education materials, source': '教育材料,來源',
'Education': '教育',
'Effects Inventory': '效果庫存',
'Either a shelter or a location must be specified': '一shelter或位置必须指定',
'Either file upload or document URL required.': '可能是檔案上传或文件URL是必要的。',
'Either file upload or image URL required.': '可能是檔案上传或影像URL是必要的。',
'Elderly person headed households (>60 yrs)': '人員年長者家庭頭(>60年期)',
'Electrical': '電子',
'Electrical, gas, sewerage, water, hazmats': '電力,瓦斯, sewerage,水分, hazmats',
'Electricity': '靜電',
'Elevated': '高專用權',
'Elevators': '升降機',
'Email Address': '電子郵件位址',
'Email Settings': '電子郵件設定',
'Email address verified, however registration is still pending approval - please wait until confirmation received.': '電子郵件位址驗證,但是登錄仍在擱置核准-請稍候直到收到確認。',
'Email settings updated': '更新電子郵件設定',
'Email': '電子郵件',
'Embassy': '大使館',
'Emergency Capacity Building project': '緊急容量建置專案',
'Emergency Department': '緊急部門',
'Emergency Shelter': '緊急Shelter',
'Emergency Support Facility': '緊急支援機能',
'Emergency Support Service': '緊急服務支援',
'Emergency Telecommunications': '電信緊急',
'Enable/Disable Layers': '啟用/停用層',
'Enabled': '已啟用',
'Enabled?': '已啟用?',
'End Date': '結束日期',
'End date should be after start date': '結束日期應該晚于開始日期',
'End date': '結束日期',
'End of Period': '結束的期間',
'English': '英文',
'Enter Coordinates in Deg Min Sec': '以度分秒的格式輸入座標值',
'Enter Coordinates:': '輸入座標:',
'Enter a GPS Coord': '輸入一个GPS协調',
'Enter a date before': '輸入一个日期之前',
'Enter a name for the spreadsheet you are uploading (mandatory).': '輸入一個您上傳的電子表格的名稱(強制)。',
'Enter a new support request.': '輸入一個新的援助申請。',
'Enter a summary of the request here.': '在這裡輸入申請摘要。',
'Enter a unique label!': '輸入獨一無二的標籤!',
'Enter a valid date before': '輸入一個有效的日期前',
'Enter a valid email': '輸入一個有效的電子郵件地址',
'Enter a valid future date': '輸入一个有效的未來日期',
'Enter some characters to bring up a list of possible matches': '輸入部分字元以啟動清單的可能的相符項',
'Enter some characters to bring up a list of possible matches.': '輸入部分字元以啟動清單的可能的相符項。',
'Enter tags separated by commas.': '輸入以逗點區隔的標籤。',
'Enter the same password as above': '輸入与上面相同的密碼',
'Enter your firstname': '輸入你的名字',
'Entered': '已輸入',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': '輸入一个電話號碼是選用的,但這樣做可讓您訂閱以收到SMS訊息。',
'Entry deleted': '已刪除項目',
'Environment': '環境',
'Equipment': '設備',
'Error encountered while applying the theme.': '發生錯誤時的主題。',
'Error in message': '錯誤訊息中',
"Error logs for '%(app)s'": '錯誤日誌的 "%(app)s"',
'Errors': '錯誤',
'Est. Delivery Date': 'EST。 交付日期',
'Estimated # of households who are affected by the emergency': '估計的#家庭誰受到緊急',
'Estimated # of people who are affected by the emergency': '估計數目的人員所影响的緊急',
'Estimated Overall Building Damage': '估計整体建置損壞',
'Estimated total number of people in institutions': '估計總數中的人員機构',
'Euros': '歐元',
'Evacuation': '撤離',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': '評估此訊息中的資訊。 (這个值不應該用于公開警告應用程式。 )',
'Event Details': '事件詳細資料',
'Event Time': '事件時間',
'Event Type': '事件類型',
'Event added': '新增事件',
'Event deleted': '刪除事件',
'Event type': '事件類型',
'Event updated': '更新事件',
'Event': '事件',
'Events': '事件',
'Example': '範例',
'Exceeded': '已超出',
'Excellent': '絕佳',
'Exclude contents': '排除內容',
'Excreta disposal': 'Excreta處置',
'Execute a pre-planned activity identified in <instruction>': '執行預先計畫中所識別的活動<instruction>',
'Exercise': '練習',
'Exercise?': '練習?',
'Exercises mean all screens have a watermark & all notifications have a prefix.': '練習表示所有畫面都具有一个浮水印和所有通知有一个字首。',
'Existing Placard Type': '現有Placard類型',
'Existing food stocks': '現有食品股票',
'Existing food stocks, main dishes': '現有的食物股票,主要餐盤',
'Existing food stocks, side dishes': '現有的食物股票,端餐盤',
'Existing location cannot be converted into a group.': '現有的位置無法轉換成一个群組。',
'Exits': '結束程式',
'Expected In': '預期中',
'Expected Out': '預期輸出',
'Experience': '經驗',
'Expiry Date': '到期日期',
'Expiry Time': '期限時間',
'Explosive Hazard': '爆炸性危害',
'Export Data': '匯出資料',
'Export Database as CSV': '資料庫匯出成CSV',
'Export in GPX format': '匯出中GPX格式',
'Export in KML format': '匯出中KML格式',
'Export in OSM format': '匯出中OSM格式',
'Export in PDF format': '匯出為PDF檔',
'Export in RSS format': '匯出為RSS格式',
'Export in XLS format': '匯出為XLS檔',
'Export': '匯出',
'Exterior Only': '僅外景',
'Exterior and Interior': '外部和內部',
'External Features': '外部特性',
'Eye Color': '眼睛顏色',
'Facebook': '臉書',
'Facial hair, color': 'Facial頭髮,顏色',
'Facial hair, type': 'Facial頭髮,類型',
'Facial hear, length': 'Facial聽,長度',
'Facilities': '設備',
'Facility Details': '機能詳細資料',
'Facility Operations': '設施營運',
'Facility Status': '機能狀態',
'Facility Type': '機能類型',
'Facility added': '新增機能',
'Facility or Location': '設備或位置',
'Facility removed': '移除機能',
'Facility updated': '機能更新',
'Facility': '機能',
'Factors affecting school attendance': '因素影响學校与會者',
'Fail': '失敗',
'Failed!': '失敗!',
'Fair': '普通',
'Falling Object Hazard': '落在物件危害',
'Families/HH': '系列/hh',
'Family Care': '系列Care',
'Family tarpaulins received': 'tarpaulins收到系列',
'Family tarpaulins, source': '系列tarpaulins,來源',
'Family': '家庭',
'Family/friends': '系列/朋友',
'Farmland/fishing material assistance, Rank': 'Farmland/釣魚物料幫助,等級',
'Fax': '傳真',
'Feature Layer Details': '功能層詳細資料',
'Feature Layer added': '功能層新增',
'Feature Layer deleted': '功能刪除層',
'Feature Layer updated': '功能更新層',
'Feature Layers': '功能層',
'Feature Namespace': '特性名稱空間',
'Feature Request': '功能要求',
'Feature Type': '功能類型',
'Feature': '特性 (feature)',
'Features Include': '功能包括',
'Female headed households': '女性頭家庭',
'Female': '女性',
'Few': '幾',
'Field Hospital': '欄位醫院',
'Field': '欄位',
'Fields tagged with a star': '標記星號',
'File': '檔案',
'Fill in Latitude': '填寫緯度',
'Fill in Longitude': '填寫經度',
'Filter Field': '過濾欄位',
'Filter Value': '過濾器值',
'Filter': '過濾器',
'Filtered search of aid pledges and requests': '過濾搜尋的輔助抵押和要求',
'Find Dead Body Report': '尋找传送主体報告',
'Find Hospital': '尋找醫院',
'Find Person Record': '尋找人員記錄',
'Find Recovery Report': '尋找恢復報告',
'Find Volunteers': '尋找志願者',
'Find a Person Record': '尋找一个人員記錄',
'Find by Name': '依名稱搜尋',
'Find': '尋找',
'Finder': '搜尋器',
'Fingerprint': '指紋',
'Fingerprinting': '產生指紋',
'Fingerprints': '指紋',
'Finish': '完成',
'Finished Jobs': '完成工作',
'Fire suppression and rescue': '滅火和救援',
'Fire': '發動',
'First Name': '名',
'First name': '名',
'Fishing': '打撈',
'Flash Flood': 'Flash水災',
'Flash Freeze': 'Flash凍結',
'Fleet Management': '車隊管理',
'Flexible Impact Assessments': '彈性評量影响',
'Flood Alerts show water levels in various parts of the country': '水災顯示警示臨界值層次的各个部分的国家',
'Flood Alerts': '水災警示',
'Flood Report Details': '水災報告詳細資料',
'Flood Report added': '水災新增報告',
'Flood Report deleted': '水災報告刪除',
'Flood Report updated': '水災報告更新',
'Flood Report': '水災報告',
'Flood Reports': '水災報告',
'Flood': '填滿',
'Flow Status': '流程狀態',
'Focal Point': '中控系統',
'Food Supply': '食品供應',
'Food assistance available/expected': '預期可提供食品援助',
'Food assistance': '协助食品',
'Food': '食物',
'Footer file %s missing!': '缺少頁腳文件 %s!',
'Footer': '頁腳',
'For Eden instances enter the application base URL, e.g. http://sync.sahanfoundation.org/eden, for other peers the URL of the synchronization interface.': '的Eden實例輸入應用程式基本URL,例如, http://sync.sahanfoundation.org/eden,其他同層級的URL的同步化介面。',
'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': '為彈出三通常是110 (995用于SSL),對于IMAP,這通常是143 (993為IMAP)。',
'For Warehouse': '倉儲',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': '国家的,這將是ISO2代碼,一个城市,它將是機场的Locode。',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': '每个同步伙伴,有一个預設同步執行的工作在指定的時間間隔。 您也可以設定更多同步工作可上自訂您的需求。 上的鏈結,按一下滑鼠右鍵來開始。',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': '為加强安全,建議您輸入使用者名稱和密碼,并通知其他機器的管理者組織中的新增這个使用者名稱和密碼對UUID同步->同步伙伴',
'For live help from the Sahana community on using this application, go to': '想要從 Sahana 社群取得使用方面的線上幫助,請前往',
'For messages that support alert network internal functions': '支援的訊息警示網路內部函數',
'For more details on the Sahana Eden system, see the': '更多關於 Sahana Eden 系統的資訊,請見',
'For more information, see': '想要了解更多資訊,請見',
'For': '適用於 的',
'Forest Fire': '樹系發動',
'Formal camp': '正式camp',
'Format': '格式',
'Forms': '表單',
'Found': '找到',
'Freezing Drizzle': '凍結毛毛雨',
'Freezing Rain': '凍結雨',
'Freezing Spray': '凍結噴灑',
'French': '法文',
'Friday': '星期五',
'From Inventory': '從庫存',
'From Location': '起點位置',
'From Organization': '來源組織',
'From Person': '從人員',
'From Warehouse': '從倉儲',
'From': '開始',
'Frost': 'frost',
'Fuel': '燃料',
'Fulfil. Status': '滿足。 狀態',
'Fulfillment Status': '供貨狀態',
'Full beard': '完整beard',
'Full': '滿載',
'Fullscreen Map': '全螢幕對映',
'Functional Tests': '功能測試',
'Functions available': '可用的函數',
'Funding Organization': '資金組織',
'Funeral': '喪葬',
'Further Action Recommended': '建議進一步的動作',
'GIS Reports of Shelter': '住房的地理信息系統報告',
'GIS integration to view location details of the Shelter': '地理信息系統集成查看住房的詳細位置介紹',
'GPS Marker': 'GPS標記',
'GPS Track File': 'GPS追蹤檔案',
'GPS Track': 'GPS跟踪',
'GPX Track': 'GPX跟踪',
'GRN Status': 'GRN狀態',
'Gale Wind': 'Gale wind',
'Gap Analysis Map': '差距分析對映',
'Gap Analysis Report': '差異分析報告',
'Gap Analysis': '間隙分析',
'Gap Map': '對映間隙',
'Gap Report': '報告間隙',
'Gateway Settings': '設定閘道',
'Gateway settings updated': '閘道設定更新',
'Gateway': '閘道',
'Gender': '性別',
'General Comment': '一般評論',
'General Medical/Surgical': '一般醫學/Surgical',
'General emergency and public safety': '緊急一般和公共安全',
'General information on demographics': '个人背景資訊的一般資訊',
'General': '一般',
'Generator': '產生者',
'Geocode': '地理',
'Geocoder Selection': '選擇地理編碼程式',
'Geometry Name': '幾何形狀名稱',
'Geonames.org search requires Internet connectivity!': 'Geonames.org搜尋需要網際網路連线功能!',
'Geophysical (inc. landslide)': 'Geophysical (收入。 landslide)',
'Geotechnical Hazards': 'Geotechnical危害',
'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Geraldo模組內無法使用執行中的Python-這需要安裝PDF輸出!',
'Get incoming recovery requests as RSS feed': '取得送入的回复要求為RSS訊息饋送',
'Girls 13-18 yrs in affected area': '在受影响地區的13-18歲女孩',
'Girls 13-18 yrs not attending school': '不上學的13-18歲女孩',
'Girls 6-12 yrs in affected area': '在受影響地區的6-12歲女童',
'Girls 6-12 yrs not attending school': '不上學的6-12歲女童',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': '提供圖像的簡要描述,例如圖片的什麼地方可以看到什麼(可選)。',
'Give information about where and when you have seen the person': '提供位置資訊,當您已經看到了人員',
'Give information about where and when you have seen them': '提供位置資訊,當您已經看到了它們',
'Global Messaging Settings': '广域传訊設定',
'Go to Request': '跳至要求',
'Go': '執行',
'Good Condition': '狀湟良好',
'Good': '良好',
'Goods Received Note': '貨物收到附註',
'Government UID': '政府UID',
'Government building': '政府建築物',
'Government': '政府機關',
'Grade': '等級',
'Greek': '希臘文',
'Green': '綠色',
'Ground movement, fissures': '移動,接地fissures',
'Ground movement, settlement, slips': '移動接地,結算,跌倒而',
'Group %(group_id)s created': '群組 %(group_id)s 建立',
'Group Description': '群組說明',
'Group Details': '群組詳細資料',
'Group ID': '群組編號',
'Group Member added': '群組成員已新增',
'Group Members': '群組成員',
'Group Memberships': '加入群組',
'Group Name': '群組名稱',
'Group Title': '群組標題',
'Group Type': '群組類別',
'Group added': '群組已新增',
'Group deleted': '群組已刪除',
'Group description': '群組說明',
'Group name': '群組名稱',
'Group type': '群組類別',
'Group updated': '群組已更新',
'Group': '群組',
'Groups removed': '群組已刪除',
'Groups': '群組',
'Guest': '訪客',
'HR Manager': 'HR管理員',
'Hail': '冰雹',
'Hair Color': '頭髮顏色',
'Hair Length': '頭髮長度',
'Hair Style': '十字準线樣式',
'Has additional rights to modify records relating to this Organization or Site.': '有其他權限,以修改記錄相關的組織或站點。',
'Has data from this Reference Document been entered into Sahana?': '具有資料從這个参考文件被輸入Sahana嗎?',
'Has only read-only access to records relating to this Organization or Site.': '只有唯讀存取記錄相關的組織或站點。',
'Has the Certificate for receipt of the shipment been given to the sender?': '凭證已接收貨物的已被指定給寄件者?',
'Has the GRN (Goods Received Note) been completed?': '具有GRN (商品接收附註)已完成?',
'Has the safety and security of women and children in your community changed since the emergency?': '具有安全的女性和子項您社群中變更的緊急嗎?',
'Has your business been damaged in the course of the disaster?': '您的業務有損壞的過程中的災難?',
'Have households received any shelter/NFI assistance or is assistance expected in the coming days?': '已收到任何家庭shelter/NFI輔助或协助預期在未來的天?',
'Have normal food sources been disrupted?': '正常食品來源已中斷?',
'Have schools received or are expecting to receive any assistance?': '學校有接收或預期接收任何幫助嗎?',
'Have the people received or are you expecting any medical or food assistance in the coming days?': '具有人員收到了嗎?或者您預期任何醫學或食品协助在未來的天?',
'Hazard Pay': '危害支付',
'Hazard': '危害',
'Hazardous Material': '危害性物料',
'Hazardous Road Conditions': '危險道路條件',
'Header Background': '標頭背景',
'Header background file %s missing!': '標頭背景檔案%!',
'Headquarters': '總公司',
'Health care assistance, Rank': '醫療协助,等級',
'Health center with beds': '使用"健康中心" beds',
'Health center without beds': '健康中心而不beds',
'Health center': '性能檢測中心',
'Health services functioning prior to disaster': '健康服務運作之前,災難',
'Health services functioning since disaster': '健康服務運作,因為災難',
'Health services status': '服務性能狀態',
'Health': '性能狀態',
'Healthcare Worker': '醫療保健工作者',
'Heat Wave': 'Wave散熱器',
'Heat and Humidity': '散熱器和濕度',
'Height (cm)': '高度(公分)',
'Height (m)': '高度(公尺)',
'Height': '高度',
'Help': '說明',
'Helps to monitor status of hospitals': '有助于監視狀態的醫院',
'Helps to report and search for Missing Persons': '關於查詢與登錄災民的使用說明',
'Helps to report and search for missing persons': '關於查詢與登錄災民的使用說明',
'Here are the solution items related to the problem.': '以下是解决方案項目相關的問題。',
'Here you will find all synchronization attempts made by either your machine or foreign machines for data exchange. This also lists data exchanges made using Sahana API.': '在這裡,您將尋找所有的同步化嘗試,或您的機器或外部機器以交換資料。 這也會列出資料交換所使用Sahana API。',
'Heritage Listed': '遺產列出',
'Hierarchy Level 0 Name (i.e. Country)': '層次〇的姓名(例如,国家)',
'Hierarchy Level 1 Name (e.g. State or Province)': '層次一名稱(例如,州或省)',
'Hierarchy Level 2 Name (e.g. District or County)': '層次二名稱(例如,地區或縣)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': '層次三名稱(例如,城市/鄉鎮/村落)',
'Hierarchy Level 4 Name (e.g. Neighbourhood)': '層次四名稱(例如, Neighbourhood)',
'Hierarchy Level 5 Name': '階層層次五名稱',
'High Water': '高臨界值',
'High': '高',
'Hindi': '北印度文',
'Hindu': '印度教',
'History': '歷程',
'Hit the back button on your browser to try again.': '命中"上一頁"按鈕將您的瀏览器然后再試一次。',
'Holiday Address': '假日位址',
'Home Address': '住家地址',
'Home Country': '住家所在國家或地區',
'Home Crime': '家庭犯罪',
'Home': '首頁',
'Hospital Details': '醫院詳細資料',
'Hospital Status Report': '醫院狀態報告',
'Hospital information added': '醫院資訊新增',
'Hospital information deleted': '醫院資訊刪除',
'Hospital information updated': '醫院資訊更新',
'Hospital status assessment.': '醫院狀態評量。',
'Hospital': '醫院',
'Hospitals': '醫院',
'Hot Spot': '熱點',
'Hour': '小時',
'Hourly': '每小時',
'Hours': '時數',
'Household kits received': '家庭套件接收',
'Household kits, source': '家庭套件,來源',
'How did boys 13-17yrs spend most of their time prior to the disaster?': '災難之前13到17歲的男孩是如何花費大部分的時間?',
'How did boys <12yrs spend most of their time prior to the disaster?': '災難之前小於12歲的男孩是如何花費大部分的時間?',
'How did boys girls 13-17yrs spend most of their time prior to the disaster?': '災難之前13到17歲的男孩女孩是如何花費大部分的時間?',
'How did girls <12yrs spend most of their time prior to the disaster?': '災難之前小於12歲的女孩是如何花費大部分的時間?',
'How do boys 13-17yrs spend most of their time now?': '13-17岁的男孩怎样利用他们大部分的時間?',
'How do boys <12yrs spend most of their time now?': '現在小於12歲的男孩是如何花費大部分的時間?',
'How do girls 13-17yrs spend most of their time now?': '13-17岁的女孩如何利用她们大部分的時間?',
'How do girls <12yrs spend most of their time now?': '少于12岁的女孩如何利用她们大部分的時間?',
'How does it work?': '運作方式?',
'How is this person affected by the disaster? (Select all that apply)': '這是如何影响的人員災難? (請選取所有適用項目)',
'How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.': '多久需要您到可用的水資源嗎? 指定所需的時間,前往"和"反面",包括佇列時間,以英尺。',
'How long does it take you to walk to the health service?': '您需要多久走到健康服务?',
'How long will the food last?': '如何將長的食物最后嗎?',
'How long will this water resource last?': '多久將此臨界值的資源?',
'How many Boys (0-17 yrs) are Dead due to the crisis': '多少男孩(〇-17年期)被停用,因為危機',
'How many Boys (0-17 yrs) are Injured due to the crisis': '多少男孩(〇-17年期的傷害,因為危機',
'How many Boys (0-17 yrs) are Missing due to the crisis': '多少男孩(〇-17年期)丟失了,因為危機',
'How many Girls (0-17 yrs) are Dead due to the crisis': '多少女孩(〇-17年期)被停用,因為危機',
'How many Girls (0-17 yrs) are Injured due to the crisis': '多少女孩(〇-17年期的傷害,因為危機',
'How many Girls (0-17 yrs) are Missing due to the crisis': '多少女孩(〇-17年期)丟失了,因為危機',
'How many Men (18 yrs+) are Dead due to the crisis': '有多少人(18 yrs+)被停用,因為危機',
'How many Men (18 yrs+) are Injured due to the crisis': '有多少人(18 yrs+)是可能由于危機',
'How many Men (18 yrs+) are Missing due to the crisis': '有多少人(18 yrs+)丟失了,因為危機',
'How many Women (18 yrs+) are Dead due to the crisis': '多少婦女(18 yrs+)被停用,因為危機',
'How many Women (18 yrs+) are Injured due to the crisis': '多少婦女(18 yrs+)是可能由于危機',
'How many Women (18 yrs+) are Missing due to the crisis': '多少婦女(18 yrs+)丟失了,因為危機',
'How many days will the supplies last?': '多少天會在提供最后一个嗎?',
'How many doctors in the health centers are still actively working?': '多少醫師在健康中心是仍然有效作用嗎?',
'How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?': '多少儲存是uninhabitable (uninhabitable =基礎和結构損毀)?',
'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?': '多少儲存遭到損壞,但保留可用(可用= Windows中斷,是否在牆面,屋脊略有損壞)?',
'How many latrines are available in the village/IDP centre/Camp?': '多少latrines中可用的村落/發展中心/Camp?',
'How many midwives in the health centers are still actively working?': '多少midwives在健康中心是仍然有效作用嗎?',
'How many new cases have been admitted to this facility in the past 24h?': '多少新案例已送入此機能在過去小時?',
'How many nurses in the health centers are still actively working?': '多少nurses在健康中心是仍然有效作用嗎?',
'How many of the patients with the disease died in the past 24h at this facility?': '多少的病患的疾病已在過去小時在這个機能"?',
'How many of the primary school age boys (6-12) in the area are not attending school?': '多少的主要學校經歷時間提升(六-12)的范圍內不参加學校嗎?',
'How many of the primary school age girls (6-12) in the area are not attending school?': '多少的主要學校時間女孩(六-12)的范圍內不参加學校嗎?',
'How many of the primary/secondary schools are now open and running a regular schedule of class?': '多少的主要/次要學校現在開啟及執行定期的類別嗎?',
'How many of the secondary school age boys (13-18) in the area are not attending school?': '多少的次要學校經歷時間提升(13-18)的范圍內不参加學校嗎?',
'How many of the secondary school age girls (13-18) in the area are not attending school?': '多少的次要學校時間女孩(13-18)的范圍內不参加學校嗎?',
'How many patients with the disease are currently hospitalized at this facility?': '多少病患的疾病目前hospitalized在這个機能"?',
'How many primary school age boys (6-12) are in the affected area?': '多少主要學校經歷時間提升" (六-12)的受影响區域嗎?',
'How many primary school age girls (6-12) are in the affected area?': '多少主要學校時間女孩" (六-12)的受影响區域嗎?',
'How many primary/secondary schools were opening prior to the disaster?': '多少主要/次要學校已開啟之前,災難?',
'How many secondary school age boys (13-18) are in the affected area?': '多少次要學校經歷時間提升" (13-18)的受影响區域嗎?',
'How many secondary school age girls (13-18) are in the affected area?': '多少次要學校時間女孩" (13-18)的受影响區域嗎?',
'How many teachers have been affected by the disaster (affected = unable to work)?': '多少教師已受影响的災難(分配=無法運作)?',
'How many teachers worked in the schools prior to the disaster?': '多少教師已經在學校之前,災難?',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': '多少明細會出現。 高的縮放比例表示很多的詳細程度,而不是整个區域。 較低的縮放比例表示看到整个區域,但不是一个高層次的詳細資料。',
'Human Resource Details': '人力資源詳細資料',
'Human Resource Management': '人力資源管理',
'Human Resource added': '新增人力資源',
'Human Resource removed': '移除人力資源',
'Human Resource updated': '人力資源更新',
'Human Resource': '人力資源',
'Human Resources Management': '人力資源管理',
'Human Resources': '人力資源部',
'Hurricane Force Wind': '颶風强制wind',
'Hurricane': '台風',
'Hygiene kits received': 'Hygiene收到的套件',
'Hygiene kits, source': 'Hygiene套件,來源',
'Hygiene practice': 'Hygiene實務',
'Hygiene problems': 'Hygiene問題',
'I am available in the following area(s)': '我可以在下列區域(S)',
'ID Label': '識別編號或符號',
'ID Label:': '識別編號或符號:',
'ID Tag Number': 'ID標籤號碼',
'ID Tag': 'ID標籤',
'ID type': 'Id 類型',
'Ice Pressure': 'ICE壓力',
'Iceberg': '冰',
'Ideally a full URL to the source file, otherwise just a note on where data came from.': '理想的完整URL的原始檔,否則只注意事項在資料來源。',
'Identification Report': '識別報告',
'Identification Reports': '識別報告',
'Identification Status': '識別狀態',
'Identification label of the Storage bin.': '識別標籤的儲存体bin。',
'Identification': '識別',
'Identified as': '識別為',
'Identified by': '識別由',
'Identity Details': '身分詳細資料',
'Identity added': '新增身分',
'Identity deleted': '刪除身分',
'Identity updated': '更新身分',
'Identity': '身分',
'If Staff have login accounts then they are given access to edit the details of the': '如果人員已登入账户,然后它們會被授予存取編輯的詳細資料',
'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.': '如果單元= M,基本單元=里,然后multiplicator是0.0001,因為" 1 = 0.001公里。',
'If a ticket was issued then please provide the Ticket ID.': '如果一个單發出,則請提供的摘記卷ID。',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': '如果使用者驗證它們自己的電子郵件位址与此網域,核准者欄位是用來判定是否由誰進一步核准是必要的。',
'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.': '如果啟用,則日誌是維護所有記錄的使用者存取。 如果停用,則仍然可以啟用每一个模組的基準。',
'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.': '如果啟用,則日誌是維護所有記錄的使用者編輯。 如果停用,則仍然可以啟用每一个模組的基準。',
'If it is a URL leading to HTML, then this will downloaded.': '如果它是一个URL產生HTML,則會下載。',
'If neither are defined, then the Default Marker is used.': '如果未定义,則預設記號使用。',
'If no marker defined then the system default marker is used': '如果沒有標記定义則系统預設標記使用',
'If no, specify why': '如果沒有,請指定原因',
'If none are selected, then all are searched.': '如果沒有選取,則所有搜尋。',
'If the location is a geographic area, then state at what level here.': '如果位置是地理區域,然后狀態層次為何在這裡。',
'If the request type is "Other", please enter request details here.': '若要求的類型為"其他",請輸入此要求的詳細資料。',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': '如果這个欄位會移入,則的使用者指定的網域會自動被指派為人員的組織',
'If this is set to True then mails will be deleted from the server after downloading.': '如果這是設為true,那么郵件將從伺服器中刪除下載之后。',
'If this record should be restricted then select which role is required to access the record here.': '如果此記錄應限制,然后選取的角色需要存取記錄在這裡。',
'If this record should be restricted then select which role(s) are permitted to access the record here.': '如果此記錄應限制,然后選取的角色允許存取記錄在這裡。',
'If yes, specify what and by whom': '如果為"是",指定什么和由誰',
'If yes, which and how': '如果為"是",以及如何',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': '如果您不輸入一个参照文件,您的電子郵件會顯示允許此資料將被驗證。',
'If you know what the Geonames ID of this location is then you can enter it here.': '如果您知道什么的GeoNames的ID這个位置之后,您可以在這裡輸入它。',
'If you know what the OSM ID of this location is then you can enter it here.': '如果您知道什么的系统ID的這个位置之后,您可以在這裡輸入它。',
'If you need to add a new document then you can click here to attach one.': '如果您需要新增一个新的文件,然后您可以按一下這裡,以連接一个。',
'If you run multiple servers in a network, you would probably see this place listing some other machines. Sahana can automatically pick servers in your organization (if they have sync username and password of your machine or if it is set to default) and add them to your list of machines to perform synchronization with. You can modify individual sync policy for each server. You can also add username and password of that server to retrieve and send data to that server. You can also manually add other servers.': '如果您執行多个伺服器在網路中,您可能會看到此處列出的某个其他機器。 Sahana可以自動選取伺服器組織中(如果它們已同步使用者名稱和密碼在您的機器,或如果它是設為預設值),并將它們新增至清單中的機器來執行同步化。 您可以修改个別同步化原則的每一个伺服器。 您也可以新增使用者名稱和密碼的伺服器,以擷取并传送資料至該伺服器。 您也可以手動新增其他伺服器。',
'If you want several values, then separate with': '如果您希望數个值,則隔開。',
'If you would like to help, then please': '如果您想要幫助,則請',
'Illegal Immigrant': '合法Immigrant',
'Image Details': '映像明細',
'Image Tags': '影像標籤',
'Image Type': '映像檔類型',
'Image Upload': '上載影像',
'Image added': '新增影像',
'Image deleted': '刪除影像',
'Image updated': '更新影像',
'Image': '映像檔',
'Image/Attachment': '影像/附件',
'Image/Other Attachment': '影像/其他附件',
'Imagery': '影像',
'Images': '影像',
'Immediate reconstruction assistance, Rank': '立即重新建构协助,等級',
'Impact Assessments': '評量影响',
'Impact Details': '影响詳細資料',
'Impact Type Details': '影响類型詳細資料',
'Impact Type added': '影响類型新增',
'Impact Type deleted': '影响類型刪除',
'Impact Type updated': '影响更新類型',
'Impact Type': '影響類型',
'Impact Types': '影响類型',
'Impact added': '影响新增',
'Impact deleted': '刪除影响',
'Impact updated': '影响更新',
'Impacts': '影響',
'Import & Export Data': '匯入及匯出資料',
'Import Data': '匯入資料',
'Import Job': '匯入工作',
'Import Jobs': '匯入工作',
'Import and Export': '匯入及匯出',
'Import from Ushahidi Instance': '從 Ushahidi實例進口',
'Import if Master': '如果主要匯入',
'Import job created': '匯入已建立工作',
'Import multiple tables as CSV': '多个表格匯入CSV',
'Import': '匯入',
'Import/Export': '匯入/匯出',
'Import/Master': '匯入/主要',
'Important': '重要性',
'Importantly where there are no aid services being provided': '重要有沒有輔助服務提供',
'Imported': '已匯入',
'Importing data from spreadsheets': '從試算表匯入資料',
'Improper decontamination': 'decontamination不當',
'Improper handling of dead bodies': '不當處理的停用主体',
'In Catalogs': '型錄中',
'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': '在GeoServer,這是層的名稱。 在WFS getCapabilities,這是FeatureType名稱部分之后,冒號(:)。',
'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': '在GeoServer,這是工作區名稱。 在WFS getCapabilities,這是FeatureType名稱組件之前的冒號(:)。',
'In Inventories': '在庫存',
'In Process': '正在處理',
'In Progress': '進行中',
'In Transit': '轉移中',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': '視窗布置對映maximises來填入視窗,讓您不需要設定較大的值在這裡。',
'In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?': '一般而言,什么是最需要舊的人員,殘障人士,子項,泉和婦女的社群嗎?',
'Inbound Mail Settings': '入埠郵件設定',
'Incident Categories': '事件種類',
'Incident Details': '事件明細',
'Incident Report Details': '事故報告詳細資料',
'Incident Report added': '新增事件報告',
'Incident Report deleted': '刪除事故報告',
'Incident Report updated': '更新事故報告',
'Incident Report': '事件報告',
'Incident Reporting System': '事件報告系统',
'Incident Reporting': '事件報告',
'Incident Reports': '事件報告',
'Incident added': '新增事件',
'Incident deleted': '刪除事件',
'Incident updated': '更新事件',
'Incident': '發生事件',
'Incidents': '發生事件',
'Incoming Shipment canceled': '進入出貨取消',
'Incoming Shipment updated': '進入出貨更新',
'Incoming': '送入的',
'Incomplete': '未完成',
'Individuals': '個人',
'Industrial Crime': '工業犯罪',
'Industrial': '製造業',
'Industry Fire': '產業發動',
'Industry close to village/camp': '產業關閉村落/camp',
'Infant (0-1)': '嬰兒 (0-1)',
'Infectious Disease (Hazardous Material)': 'Infectious疾病(危險物料)',
'Infectious Disease': 'Infectious疾病',
'Infestation': '影响',
'Informal Leader': '非正式領導者',
'Informal camp': '非正式camp',
'Information gaps': '資訊間隙',
'Infusion catheters available': 'catheters可用注入',
'Infusion catheters need per 24h': 'catheters需要注入每小時',
'Infusion catheters needed per 24h': 'catheters需要注入每小時',
'Infusions available': 'Infusions可用',
'Infusions needed per 24h': 'Infusions需要每小時',
'Injuries': '傷害',
'Input Job': '輸入工作',
'Inspected': '已檢驗',
'Inspection Date': '檢驗日期',
'Inspection date and time': '檢驗日期和時間',
'Inspection time': '檢驗時間',
'Inspector ID': '視察者 ID',
'Instance Type': '實例類型',
'Instance URL': '實例URL',
'Instant Porridge': '即時Porridge',
'Institution': '機構',
'Insufficient vars: Need module, resource, jresource, instance': '不足變數:需要模組,資源, jresource,實例',
'Insufficient': '不足',
'Intake Items': '進氣區項目',
'Intergovernmental Organization': 'Intergovernmental組織',
'Interior walls, partitions': '內部牆壁,分割區',
'Internal Features': '內部功能',
'Internal State': '內部狀態',
'International NGO': '国際NGO',
'International Organization': '国際組織',
'International Staff': '国際人員',
'Intervention': '介入',
'Interview taking place at': '進行訪談在',
'Invalid Organization ID!': '無效組織標識!',
'Invalid Query': '無效的查詢',
'Invalid Request': '無效要求',
'Invalid UUID!': 'UUID無效!',
'Invalid email': '無效的電子郵件',
'Invalid request!': '要求無效!',
'Invalid ticket': '無效的票据',
'Invalid': '無效',
'Inventories with Item': '与庫存項目',
'Inventories with Items': '与庫存項目',
'Inventories': '庫存',
'Inventory Item Details': '庫存項目詳細資料',
'Inventory Item added': '添加庫存項目',
'Inventory Item deleted': '庫存項目已刪除',
'Inventory Item updated': '庫存項目更新',
'Inventory Item': '資產項目',
'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': '庫存項目包括耗材与那些會變成資產在其目的地。',
'Inventory Items': '配備盤點項目',
'Inventory Location Details': '庫存位置詳細資料',
'Inventory Location added': '新增庫存位置',
'Inventory Location updated': '庫存位置更新',
'Inventory Location': '庫存位置',
'Inventory Locations': '庫存位置',
'Inventory Management': '庫存管理',
'Inventory Stock Position': '庫存位置',
'Inventory Store Details': '資產儲存庫明細',
'Inventory Store added': '資產新增至儲存庫',
'Inventory Store deleted': '庫存刪除儲存庫',
'Inventory Store updated': '庫存更新儲存庫',
'Inventory Store': '資產儲存庫',
'Inventory Stores': '儲存庫存',
'Inventory functionality is available for:': '資產功能可用于:',
'Inventory of Effects': '庫存的效果',
'Inventory': '庫存',
'Inventory/Ledger': '庫存/分類账',
'Is adequate food and water available for these institutions?': '足够食物和水用于這些機构嗎?',
'Is editing level L%d locations allowed?': '正在編輯層次L%d位置容許嗎?',
'Is it safe to collect water?': '它是安全來收集臨界值嗎?',
'Is there any industrial or agro-chemical production close to the affected area/village?': '是否有任何工業或agro-化學生產關閉至受影响的區域/村落?',
'Is this a strict hierarchy?': '這是一个嚴格階層?',
'Issuing Authority': '發出單位',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '它不只會擷取工作區所作用中,但也會擷取的相關資訊范圍的專案會提供每一个區域。',
'It gives four options: No Sync, Newer Timestamp, Keep All, Replace All': '它提供四个選項:沒有同步,新的時間戳記,保留所有,請更換所有',
'It is built using the Template agreed by a group of NGOs working together as the': '它建置于使用范本所認可群組的迫切合作的',
'It is suggested to open the 2 locations into new tabs so that it can be decided which is the best one to keep out of the 2.': '建議開啟這二个位置中新的標籤,以决定保留最好的一个。',
'Item Added to Shipment': '新增項目至出貨',
'Item Catalog Categories': '項目型錄種類',
'Item Catalog Category Details': '項目型錄種類詳細資料',
'Item Catalog Category added': '型錄項目新增種類',
'Item Catalog Category deleted': '項目刪除型錄種類',
'Item Catalog Category updated': '項目型錄種類更新',
'Item Catalog Category': '項目型錄種類',
'Item Catalog Details': '型錄項目詳細資料',
'Item Catalog added': '型錄項目新增',
'Item Catalog deleted': '型錄項目刪除',
'Item Catalog updated': '型錄項目更新',
'Item Catalogs': '型錄項目',
'Item Categories': '項目種類',
'Item Category Details': '項目種類明細',
'Item Category added': '項目新增種類',
'Item Category deleted': '刪除項目種類',
'Item Category updated': '更新項目種類',
'Item Category': '項目種類',
'Item Details': '項目明細',
'Item Pack Details': '項目套件詳細資料',
'Item Pack added': '項目套件新增',
'Item Pack deleted': '項目套件刪除',
'Item Pack updated': '項目更新套件',
'Item Packs': '項目套件',
'Item Sub-Categories': '項目子種類',
'Item Sub-Category Details': '項目子種類明細',
'Item Sub-Category added': '項目子新增種類',
'Item Sub-Category deleted': '項目子類別刪除',
'Item Sub-Category updated': '項目子類別更新',
'Item Sub-Category': '項目子種類',
'Item added to Inventory': '項目新增至庫存',
'Item added to shipment': '新增項目至出貨',
'Item added': '已新增項目',
'Item already in Bundle!': '項目已軟体組中!',
'Item already in Kit!': '項目已在套件!',
'Item already in budget!': '項目已在預算!',
'Item deleted': '已刪除項目',
'Item removed from Inventory': '從庫存移除的項目',
'Item updated': '更新項目',
'Item': '項目',
'Items in Category can be Assets': '項目中"類別"可以資產',
'Items': '項目',
'Japanese': '日文',
'Jerry can': 'Jerry可以',
'Jew': '猶太教',
'Job Role Catalog': '工作角色型錄',
'Job Role Details': '工作角色詳細資料',
'Job Role added': '工作角色新增',
'Job Role deleted': '工作角色刪除',
'Job Role updated': '工作角色更新',
'Job Role': '職位',
'Job Roles': '職務',
'Job Title': '工作職稱',
'Jobs': '工作',
'Journal Entry Details': '日誌項目詳細資料',
'Journal entry added': '新增日誌項目',
'Journal entry deleted': '日誌項目刪除',
'Journal entry updated': '日誌項目更新',
'Journal': '日誌',
'Just Once': '只要一次',
'KPIs': 'KPI',
'Keep All': '全部保留',
'Keep Local': '保持局部',
'Key Details': '索引鍵詳細資料',
'Key added': '新增金鑰',
'Key deleted': '鍵刪除',
'Key updated': '更新金鑰',
'Key': '注意',
'Keys': '索引鍵',
'Kit Contents': '套件內容',
'Kit Details': '套件明細',
'Kit Updated': '更新套件',
'Kit added': '新增套件',
'Kit deleted': '刪除套件',
'Kit updated': '更新套件',
'Kit': '套件',
'Kits': '配套',
'Known Identities': '已知身分',
'Known incidents of violence against women/girls': '已知事件的暴力對婦女/女孩',
'Known incidents of violence since disaster': '已知事件的暴力因為災難',
'LICENCE': '授權',
'LICENSE': '軟體使用權',
'LMS Administration': 'LMS管理',
'Label': '標籤(Label)',
'Lack of material': '缺少的物料',
'Lack of school uniform': '缺乏學校统一',
'Lack of supplies at school': '缺少的耗材校園',
'Lack of transport to school': '缺少的传輸學校',
'Lactating women': 'Lactating婦女',
'Landmark Details': '里程碑詳細資料',
'Landmark added': '新增里程碑',
'Landmark deleted': '刪除里程碑',
'Landmark updated': '里程碑更新',
'Landmarks': '里程碑',
'Language': '語言',
'Last Name': '姓',
'Last known location': '前次已知位置',
'Last name': '姓',
'Last synchronization on': '上次同步化',
'Last synchronization time': '上次同步化時間',
'Last updated by': '上次更新的人',
'Last updated on': '上次更新於',
'Last updated': '前次更新',
'Latitude & Longitude': '緯度和經度',
'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '緯度是北美-南-(上下)。 緯度是〇equator与正在北部地區部分和負數在南部部分。',
'Latitude is North-South (Up-Down).': '緯度是北美-南-(上下)。',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '緯度是〇equator与正在北部地區部分和負數在南部部分。',
'Latitude of Map Center': '緯度的對映中心',
'Latitude of far northern end of the region of interest.': '緯度的最北端結束的區域相關的。',
'Latitude of far southern end of the region of interest.': '緯度的最南部結束區域相關的。',
'Latitude should be between': '緯度必须介于',
'Latitude': '緯度',
'Law enforcement, military, homeland and local/private security': '法律强制,軍事,国土和本端/私密安全',
'Layer Details': '層詳細資料',
'Layer added': '新增層',
'Layer deleted': '刪除層',
'Layer updated': '更新層',
'Layer': '層',
'Layers updated': '層更新',
'Layers': '層',
'Layout': '配置',
'Leader': '領導人',
'Left-to-Right': '由左至右',
'Legend Format': '圖例格式',
'Length (m)': '長度(M)',
'Length': '長度',
'Level 1 Assessment Details': '層次一評量詳細資料',
'Level 1 Assessment added': '層次一評量新增',
'Level 1 Assessment deleted': '層次一評量刪除',
'Level 1 Assessment updated': '層次一評量更新',
'Level 1 Assessments': '層次一評量',
'Level 1': '層次 1',
'Level 2 Assessment Details': '層次二評量詳細資料',
'Level 2 Assessment added': '層次二評量新增',
'Level 2 Assessment deleted': '層次二評量刪除',
'Level 2 Assessment updated': '層次二評量更新',
'Level 2 Assessments': '層次二評量',
'Level 2 or detailed engineering evaluation recommended': '層次二或詳細工程評估建議',
'Level 2': '層次 2',
'Level': '層次',
'Library support not available for OpenID': '庫不支援可用于OpenID',
'Line': '明細行',
'LineString': '線串',
'Link Item & Shipment': '鏈結項目&出貨',
'Link an Item & Shipment': '鏈結項目与出貨',
'Linked Records': '鏈結記錄',
'Linked records': '鏈結記錄',
'List / Add Baseline Types': '清單/新增基準线類型',
'List / Add Impact Types': '清單/新增影响類型',
'List / Add Services': '清單/新增服務',
'List / Add Types': '清單/新增類型',
'List Activities': '列出活動',
'List Aid Requests': '需求列表',
'List All Assets': '所有資產清單',
'List All Catalog Items': '列出所有型錄項目',
'List All Commitments': '列出所有Commitments',
'List All Entries': '所有項目清單',
'List All Item Categories': '列出所有項目種類',
'List All Memberships': '顯示所有組員',
'List All Received Shipments': '列出所有接收出貨',
'List All Records': '所有記錄清單',
'List All Reports': '列示全部報告',
'List All Requested Items': '列出所有要求的項目',
'List All Requests': '所有要求清單',
'List All Sent Shipments': '列出所有传送出貨',
'List All': '列示全部',
'List Alternative Items': '替代清單項目',
'List Assessment Summaries': '清單評量摘要',
'List Assessments': '評量清單',
'List Asset Assignments': '列示資產分派',
'List Assets': '列出資產',
'List Availability': '清單可用性',
'List Baseline Types': '列舉基準線類型',
'List Baselines': '列舉基準線',
'List Brands': '列舉品牌',
'List Budgets': '列舉預算',
'List Bundles': '列舉捆綁',
'List Camp Services': 'Camp清單服務',
'List Camp Types': 'Camp清單類型',
'List Camps': '清單Camps',
'List Catalog Items': '列舉目錄項目',
'List Catalogs': '目錄清單',
'List Category<>Sub-Category<>Catalog Relation': '清單Category<>Sub-Category<>Catalog關系',
'List Certificates': '凭證清單',
'List Certifications': '認證清單',
'List Checklists': '核對清單',
'List Cluster Subsectors': '叢集清單Subsectors',
'List Clusters': '叢集清單',
'List Commitment Items': '清單項目承諾',
'List Commitments': '清單Commitments',
'List Competencies': '清單能力',
'List Competency Ratings': '清單能力等級',
'List Configs': '配置清單',
'List Conflicts': '冲突清單',
'List Contact Information': '聯絡資訊清單',
'List Contacts': '列出聯絡人',
'List Course Certicates': '清單進程凭證',
'List Courses': '課程清單',
'List Credentials': '認證清單',
'List Current': '現行清單',
'List Distribution Items': '配送清單項目',
'List Distributions': '配送清單',
'List Documents': '清單文件',
'List Donors': '清單Donors',
'List Events': '事件清單',
'List Facilities': '設備清單',
'List Feature Layers': '清單功能層',
'List Flood Reports': '水災清單報告',
'List Groups': '顯示群組',
'List Groups/View Members': '列示群組成員/檢視',
'List Hospitals': '醫院清單',
'List Human Resources': '人力資源清單',
'List Identities': '身分清單',
'List Images': '影像清單',
'List Impact Assessments': '清單影响評量',
'List Impact Types': '影响清單類型',
'List Impacts': '影响清單',
'List Incident Reports': '事件報告清單',
'List Incidents': '事件清單',
'List Inventory Items': '庫存項目清單',
'List Inventory Locations': '清單庫存位置',
'List Inventory Stores': '清單儲存庫存',
'List Item Catalog Categories': '型錄種類清單項目',
'List Item Catalogs': '清單項目型錄',
'List Item Categories': '項目種類清單',
'List Item Packs': '清單項目套件',
'List Item Sub-Categories': '清單項目子種類',
'List Items in Inventory': '清單中項目的庫存',
'List Items': '清單項目',
'List Job Roles': '列出工作角色',
'List Keys': '列出金鑰',
'List Kits': '套件清單',
'List Landmarks': '清單里程碑',
'List Layers': '層清單',
'List Level 1 Assessments': '清單層次一評量',
'List Level 1 assessments': '清單層次一評量',
'List Level 2 Assessments': '清單層次二評量',
'List Level 2 assessments': '清單層次二評量',
'List Locations': '列示位置',
'List Log Entries': '日誌項目清單',
'List Map Profiles': '對映清單配置',
'List Markers': '標記清單',
'List Members': '列示成員',
'List Memberships': '成員資格清單',
'List Messages': '列出訊息',
'List Metadata': 'meta資料清單',
'List Missing Persons': '失蹤災民列表',
'List Missions': '列出任務清單',
'List Need Types': '清單需要類型',
'List Needs': '需求清單',
'List Notes': '清單附註',
'List Offices': '辦公室清單',
'List Organizations': '組織清單',
'List Partners': '伙伴清單',
'List Peers': '對等清單',
'List Personal Effects': '列出个人效果',
'List Persons': '人員清單',
'List Photos': '清單照片',
'List Population Statistics': '列出人口統計資料',
'List Positions': '位置清單',
'List Problems': '問題清單',
'List Projections': '預測清單',
'List Projects': '專案清單',
'List Rapid Assessments': '快速清單評量',
'List Received Items': '清單接收項目',
'List Received Shipments': '清單收到出貨',
'List Records': '記錄清單',
'List Registrations': '登錄清單',
'List Relatives': '關係列表',
'List Relief Items': '浮雕清單項目',
'List Reports': '清單報告',
'List Request Items': '要求清單項目',
'List Requested Skills': '需求技能列表',
'List Requests': '要求清單',
'List Resources': '列出資源',
'List Responses': '清單回應',
'List Rivers': '清單Rivers',
'List Roles': '列出角色',
'List Rooms': '列出會談室清單',
'List Scenarios': '清單實務',
'List School Districts': '清單學校行政區',
'List School Reports': '學校清單報告',
'List Sections': '清單區段',
'List Sectors': '磁區清單',
'List Sent Items': '传送的項目清單',
'List Sent Shipments': '列出貨清單',
'List Service Profiles': '服務設定檔清單',
'List Settings': '清單設定',
'List Shelter Services': '列表庇護服務',
'List Shelter Types': '列表收容所類型',
'List Shelters': '列表收容所',
'List Shipment Transit Logs': '列表過境貨物日誌',
'List Shipment/Way Bills': '清單出貨/方式账單',
'List Shipment<>Item Relation': '列表運費<>物品關係',
'List Shipments': '列表裝運',
'List Sites': '站點清單',
'List Skill Equivalences': '技能清單同等',
'List Skill Provisions': '技能清單條款',
'List Skill Types': '技能清單類型',
'List Skill': '技能清單',
'List Skills': '技能清單',
'List Solutions': '解决方案清單',
'List Sources': '來源清單',
'List Staff Types': '人員清單類型',
'List Staff': '列出人員清單',
'List Status': '清單狀態',
'List Storage Bin Type(s)': 'bin清單儲存体類型(S)',
'List Storage Bins': '清單存儲Bin',
'List Storage Location': '儲存体位置清單',
'List Subscriptions': '清單訂閱',
'List Subsectors': '清單Subsectors',
'List Support Requests': '清單支援要求',
'List Survey Answers': '清單調查答案',
'List Survey Questions': '調查問題清單',
'List Survey Sections': '清單調查區段',
'List Survey Series': '清單調查系列',
'List Survey Templates': '清單調查范本',
'List Tasks': '列出作業',
'List Teams': '小組清單',
'List Themes': '布景主題清單',
'List Tickets': '問題單清單',
'List Tracks': '追蹤清單',
'List Trainings': '清單撰文',
'List Units': '單位清單',
'List Updates': '更新清單',
'List Users': '列出使用者',
'List Vehicle Details': '交通工具資料',
'List Vehicles': '交通工劇烈表',
'List Volunteers': '志愿者清單',
'List Warehouses': '倉庫清單',
'List all': '列示全部',
'List available Scenarios': '列出可用的實務范例',
'List of CSV files uploaded': '已上傳CSV 檔案列表',
'List of CSV files': 'CSV 檔案列表',
'List of Items': '項目清單',
'List of Missing Persons': '清單遺漏的人員',
'List of Peers': '清單的對等',
'List of Reports': '報告清單',
'List of Requests': '要求清單',
'List of Spreadsheets uploaded': '清單的試算表上传',
'List of Spreadsheets': '清單的試算表',
'List of Volunteers for this skill set': '清單的主動参与者的這个職位技能設定',
'List of Volunteers': '志愿者清單',
'List of addresses': '清單的位址',
'List unidentified': '未定义的清單',
'List': '清單',
'List/Add': '顯示/新增群組',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': '列出"誰正在做什么& "where"。可釋放機构來协調它們的活動',
'Live Help': '即時說明',
'Livelihood': '生計',
'Load Cleaned Data into Database': '加載已清理的數據到數據庫',
'Load Details': '載入詳細資料',
'Load Raw File into Grid': '載入原始檔案到網格',
'Load the details to help decide which is the best one to keep out of the 2.': '載入的詳細資料來协助判定哪个是最好的一个保留的二。',
'Loading Locations...': '正在載入位置...',
'Loading': '載入中',
'Local Language': '本地語言',
'Local Name': '綽號',
'Local Names': '綽號',
'Location 1': '位置一',
'Location 2': '位置二',
'Location De-duplicated': '位置取消重复',
'Location Details': '地點明細',
'Location Hierarchy Level 0 Name': '位置階層層次〇名稱',
'Location Hierarchy Level 1 Name': '位置階層層次一名稱',
'Location Hierarchy Level 2 Name': '位置階層層次二名稱',
'Location Hierarchy Level 3 Name': '位置階層層次三名稱',
'Location Hierarchy Level 4 Name': '位置階層層次四名稱',
'Location Hierarchy Level 5 Name': '位置階層層次五名稱',
'Location added': '新增位置',
'Location cannot be converted into a group.': '位置不能轉換成一个群組。',
'Location deleted': '位置已移除',
'Location details': '地點明細',
'Location group cannot be a parent.': '位置群組不能是母項。',
'Location group cannot have a parent.': '位置群組不能有一个母項。',
'Location groups can be used in the Regions menu.': '位置群組可用于區域的功能表。',
'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': '位置群組可能用來過濾顯示的內容在地圖上和在搜尋結果中只能實体所涵蓋的位置群組。',
'Location updated': '更新位置',
'Location': '地點',
'Location: ': '地點: ',
'Location:': '位置:',
'Locations De-duplicator': '取消位置duplicator',
'Locations of this level need to have a parent of level': '這个層次的位置需要有母項的層次',
'Locations should be different!': '應該是不同的位置!',
'Locations': '位置',
'Lockdown': '鎖定',
'Log Entry Details': '日誌項目詳細資料',
'Log entry added': '新增日誌項目',
'Log entry deleted': '刪除日誌',
'Log entry updated': '日誌項目更新',
'Log': '日誌',
'Logged in': '已登入',
'Logged out': '登出',
'Login': '登入',
'Logistics Management System': '物流管理系统',
'Logistics Management': '管理物流',
'Logistics': '物流',
'Logo file %s missing!': 'Logo file %s 失蹤!',
'Logo': 'logo',
'Logout': '登出',
'Long Text': '長文字',
'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.': '經度是西-East (短)。 緯度是北美-南-(上下)。 緯度是〇equator与正在北部地區部分和負數在南部部分。 經度是〇本初子午线(格林威治標準時間表示),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。 這些需要新增以小數度。',
'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '經度是西-East (短)。 經度是〇本初子午线(格林威治標準時間表示),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。',
'Longitude is West - East (sideways).': '經度是西-East (短)。',
'Longitude is West-East (sideways).': '經度是西-East (短)。',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '經度是〇本初子午线(格林威治標準時間表示),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。',
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '經度是〇本初子午线(透過格林威治,英国),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。',
'Longitude of Map Center': '經度的對映中心',
'Longitude of far eastern end of the region of interest.': '經度的最東部結束的區域相關的。',
'Longitude of far western end of the region of interest.': '經度的最西方結束的區域相關的。',
'Longitude should be between': '經度必须介于',
'Longitude': '經度',
'Lost Password': '忘記密碼',
'Lost': '遺失',
'Low': '低',
'Machine with which data was exchanged.': '機器資料交換。',
'Magnetic Storm': '磁性暴雨',
'Main cash source': '主要現金來源',
'Main income sources before disaster': '主收入來源前災難',
'Major Damage': '主要損壞',
'Major expenses': '主要費用',
'Major outward damage': '主要往外損壞',
'Make Commitment': '使承諾',
'Make New Commitment': '使新承諾',
'Make Pledge': '使質押',
'Make Request': '使要求',
'Make a Request for Aid': '提出請求的輔助',
'Make a Request': '使一个要求',
'Make a request': '使一个要求',
'Make preparations per the <instruction>': '準備讓每个<instruction>',
'Male': '男性',
'Malnutrition present prior to disaster': 'Malnutrition存在之前災難',
'Manage Category': '管理種類',
'Manage Events': '管理事件',
'Manage Item catalog': '管理項目型錄',
'Manage Kits': '管理套件',
'Manage Relief Item Catalogue': '管理浮雕項目型錄',
'Manage Sub-Category': '管理子種類',
'Manage Users & Roles': '管理使用者角色',
'Manage Warehouses/Sites': '管理倉庫/站點',
'Manage Your Facilities': '管理您的設備',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': '管理要求提供,資產,人員或其他資源。 比對庫存位置提供要求。',
'Manage requests of hospitals for assistance.': '管理要求的醫院的协助。',
'Manage volunteers by capturing their skills, availability and allocation': '管理参与者擷取其技能,可用性和配置',
'Manage': '管理',
'Manager': '管理程式',
'Managing Office': '辦公室管理',
'Managing, Storing and Distributing Relief Items': '管理,儲存和散發的項目',
'Managing, Storing and Distributing Relief Items.': '管理,儲存和散發的項目。',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': '必要的。 在GeoServer,這是層的名稱。 在WFS getCapabilities,這是FeatureType名稱部分之后,冒號(:)。',
'Mandatory. The URL to access the service.': '必要的。 的URL來存取服務。',
'Manual Synchronization': '手動同步化',
'Manual': '手動',
'Many': '許多',
'Map Center Latitude': '對映中心緯度',
'Map Center Longitude': '對映中心經度',
'Map Profile Details': '對映配置詳細資料',
'Map Profile added': '新增對映配置',
'Map Profile deleted': '刪除對映配置',
'Map Profile removed': '移除對映配置',
'Map Profile updated': '對映配置更新',
'Map Profile': '對映配置',
'Map Profiles': '對映配置',
'Map Height': '對映高度',
'Map Service Catalogue': '對服務型錄',
'Map Settings': '對映設定',
'Map Viewing Client': '檢視用户端對映',
'Map Width': '地圖寬度',
'Map Zoom': '對映縮放',
'Map of Hospitals': '對映的醫院',
'Map': '地圖',
'Mapping': '地圖模組',
'Marine Security': '海運安全',
'Marital Status': '婚姻狀況',
'Marker Details': '標記詳細資料',
'Marker added': '新增標記',
'Marker deleted': '標記刪除',
'Marker updated': '更新標記',
'Marker': '標記',
'Markers': '標記',
'Master Message Log to process incoming reports & requests': '主要訊息日誌以處理進入的報告和要求',
'Master Message Log': '主要訊息日誌',
'Match Percentage': '符合百分比',
'Match Requests': '符合要求',
'Match percentage indicates the % match between these two records': '相符百分比表示的百分比之間符合這二个記錄',
'Match?': '相符?',
'Matching Catalog Items': '相符的型錄項目',
'Matching Items': '相符的項目',
'Matching Records': '相符記錄',
'Matrix of Choices (Multiple Answers)': '矩陣的選項(多个答案)',
'Matrix of Choices (Only one answer)': '矩陣的選項(只能有一个答案)',
'Matrix of Text Fields': '矩陣的文字欄位',
'Max Persons per Dwelling': '每住宅最大人數',
'Maximum Location Latitude': '最大位置緯度',
'Maximum Location Longitude': '最大位置經度',
'Maximum Weight': '最大重量',
'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.': '存儲位置的最大承重能力然後從下拉列表中選擇單位。',
'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.': '最大重量的項目儲存在可以包含。 接着選擇裝置中的下拉列表。',
'Measure Area: Click the points around the polygon & end with a double-click': '測量面積:點擊多邊形周圍的點和雙擊結束',
'Measure Length: Click the points along the path & end with a double-click': '測量長度:按一下點,并沿着路迳&下一个按一下',
'Measures': '測量',
'Medical Attention': '醫學注意',
'Medical Staff': '醫療人員',
'Medical Supplies': '醫療補給品',
'Medical and public health': '醫療及公共健康',
'Medicine': '醫藥',
'Medium': '中',
'Megabytes per Month': '每月(MB)',
'Member removed from Group': '組員已刪除',
'Members': '成員',
'Membership Details': '組員內容',
'Membership updated': '組員已更新',
'Membership': '成員資格',
'Memberships': '群組設定',
'Mental': '養心殿',
'Message Details': '訊息詳細資料',
'Message Variable': '訊息變數',
'Message added': '新增訊息',
'Message deleted': '訊息已刪除',
'Message sent to outbox': '訊息传送至寄件匣',
'Message updated': '更新訊息',
'Message variable': '訊息變數',
'Message': '訊息',
'Messages': '訊息',
'Messaging settings updated': '传訊設定更新',
'Messaging': '傳訊模組',
'Metadata Details': 'Meta 資料的詳細資料',
'Metadata added': '新增meta資料',
'Metadata can be supplied here to be applied to all uploaded photos, if desired.': 'meta資料可以提供來套用至所有上传的照片, (如果想要)。',
'Metadata deleted': '刪除meta資料',
'Metadata updated': '更新meta資料',
'Metadata': 'meta 資料 (metadata)',
'Meteorological (inc. flood)': 'Meteorological (收入。 水災)',
'Method used': '使用方法',
'Micronutrient malnutrition prior to disaster': '災前微量營養素營養不良',
'Middle Name': '中間名',
'Migrants or ethnic minorities': 'Migrants或鬥爭',
'Military': '軍事',
'Minimum Bounding Box': '最小外框',
'Minimum Location Latitude': '最小位置緯度',
'Minimum Location Longitude': '最小位置經度',
'Minimum shift time is 6 hours': '最小時間為六小時移位',
'Minor Damage': '次要損壞',
'Minor/None': '次要/無',
'Minorities participating in coping activities': '勝出参与复制活動',
'Minute': '分鐘',
'Minutes must be a number between 0 and 60': '分鐘必须是〇和60之間的數字',
'Minutes must be between 0 and 60': '分鐘必须在〇和60之間',
'Minutes per Month': '分鐘每月',
'Minutes should be a number greater than 0 and less than 60': '分鐘應該是一个數字大于〇且小于60',
'Minutes should be greater than 0 and less than 60': '分鐘應該大于〇且小于60',
'Miscellaneous': '雜項',
'Missing Person Details': '遺漏人員詳細資料',
'Missing Person Registry': '遺漏人員登錄',
'Missing Person Reports': '失蹤人員報表',
'Missing Person': '遺漏人員',
'Missing Persons Registry': '遺漏人員登錄',
'Missing Persons Report': '失蹤人員報表',
'Missing Persons': '失蹤人員',
'Missing Report': '遺漏報告',
'Missing Senior Citizen': '遺漏資深市民',
'Missing Vulnerable Person': '遺漏漏洞人員',
'Missing': '遺漏',
'Mission Details': '任務詳細資料',
'Mission Record': '任務記錄',
'Mission added': '添加任務',
'Mission deleted': '刪除任務',
'Mission updated': '更新任務',
'Missions': '展望',
'Mobile Assess.': '行動評定。',
'Mobile Basic Assessment': '行動基本評量',
'Mobile Phone': '行動電話',
'Mobile': '行動電話',
'Mode': '模式',
'Model/Type': '型號/類型',
'Modem Settings': '數據機設定',
'Modem settings updated': '數据機設定更新',
'Modem': '數據機',
'Moderate': '普通',
'Moderator': '主持人',
'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': '修改功能:選取的功能時,您要deform和拖放的其中一个點deform的功能,您選擇的方式',
'Modify Information on groups and individuals': '修改群組的相關資訊及个人',
'Modifying data in spreadsheet before importing it to the database': '修改資料在試算表中匯入之前,將它的資料庫',
'Module Administration': '模組管理',
'Module disabled!': '模組已停用!',
'Module provides access to information on current Flood Levels.': '模組可讓您存取資訊目前正層次。',
'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.': '模組儲存結构化報告來完成專業組織-目前資料包括WFP評估。',
'Module': '模組',
'Monday': '星期一',
'Monthly Cost': '每月成本',
'Monthly Salary': '每月薪資',
'Months': '月數',
'More about OpenID': '更多關於 OpenID',
'Morgue Status': 'Morgue狀態',
'Morgue Units Available': 'Morgue可用的單位',
'Motorcycle': '摩托車',
'Move Feature: Drag feature to desired location': '移動功能:拖曳功能所需的位置',
'Movements (Filter In/Out/Lost)': '移動過濾器(輸入/輸出/遺失)',
'MultiPolygon': '多個多邊形',
'Multiple Choice (Multiple Answers)': '多个選項(多个答案)',
'Multiple Choice (Only One Answer)': '多个選項(僅一回答)',
'Multiple Matches': '多個相符的項目',
'Multiple Text Fields': '多个文字欄位',
'Multiple': '多個',
'Muslim': '回教',
'Must a location have a parent location?': '必须有一个位置有一个母項位置?',
'My Current function': '我的現行函數',
'My Tasks': '我的任務',
'My Volunteering': '我的志工任務',
'N/A': '不適用',
'NO': '無影響',
'NZSEE Level 1': 'NZSEE層次一',
'NZSEE Level 2': 'NZSEE層次二',
'Name and/or ID Label': '名稱和/或識別號碼標籤',
'Name and/or ID': '名稱和/或識別號碼',
'Name of School': '學校的名稱',
'Name of Storage Bin Type.': '存儲箱類型名稱。',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': '文件的名稱(&可選的子路徑)位於靜態應該用於背景的頭。',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': '檔案的名稱(&選用子路迳)位于靜態應該使用的最左影像。',
'Name of the file (& optional sub-path) located in views which should be used for footer.': '檔案的名稱(&選用子路迳)位于視圖中應該使用的頁尾。',
'Name of the person in local language and script (optional).': '的人員名稱以當地語言及Script (選用)。',
'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.': '名稱的單位或部門這份報告的参照。 如果您保留空白醫院沒有子分區。',
'Name': '名稱',
'Name, Org and/or ID': '名稱,組織及/或ID',
'Name/Model/Type': '名稱/模型/類型',
'Names can be added in multiple languages': '名稱可以添加多語言',
'National ID Card': '国家ID卡',
'National NGO': 'NGO国家',
'National Staff': '国家人員',
'National': 'NATIONAL',
'Nationality of the person.': '聯絡人的國籍.',
'Nationality': '國籍',
'Nautical Accident': 'Nautical意外',
'Nautical Hijacking': 'Nautical强制存取',
'Need Type Details': '需要類型詳細資料',
'Need Type added': '需要新增類型',
'Need Type deleted': '需要刪除類型',
'Need Type updated': '需要更新類型',
'Need Type': '需要類型',
'Need Types': '需要類型',
'Need added': '需要新增',
'Need deleted': '需要刪除',
'Need to be logged-in to be able to submit assessments': '需要登入,才能提交評量',
'Need to configure Twitter Authentication': '需要配置Twitter鉴別',
'Need to select 2 Locations': '需要選取二个位置',
'Need to specify a Budget!': '需要指定的預算!',
'Need to specify a Kit!': '需要指定一个套件!',
'Need to specify a Resource!': '必须指定資源!',
'Need to specify a bundle!': '需要指定軟体組!',
'Need to specify a feature group!': '需要指定一項特性群組!',
'Need to specify a group!': '需要指定"群組"!',
'Need to specify a location to search for.': '需要指定一个位置來搜尋。',
'Need to specify a role!': '需要指定一个角色!',
'Need to specify a table!': '需要指定一个表格!',
'Need to specify a user!': '需要指定一个使用者!',
'Need updated': '需要更新',
'Needs Details': '需求詳細資料',
'Needs Maintenance': '需要維護',
'Needs to reduce vulnerability to violence': '需要减少漏洞暴力',
'Needs': '需要',
'Negative Flow Isolation': '負數流程隔離',
'Neighborhood': '鄰居',
'Neighbourhood': '鄰居',
'Neighbouring building hazard': '鄰近建置危害',
'Network': '網路',
'Neurology': '神經內科',
'New Assessment reported from': '新的評量報告從',
'New Certificate': '新建憑證',
'New Checklist': '新核對清單',
'New Entry': '新建文章',
'New Event': '新建事件',
'New Item Category': '新項目種類',
'New Job Role': '新工作角色',
'New Location Group': '新位置群組',
'New Location': '新位置',
'New Patient': '新病患',
'New Peer': '新的同層級',
'New Record': '新建記錄',
'New Report': '新建報告',
'New Request': '我的要求',
'New Scenario': '新方案',
'New Skill': '新技能',
'New Solution Choice': '新解决方案選項',
'New Staff Member': '新人員成員',
'New Support Request': '新申請',
'New Synchronization Peer': '新的同層級同步化',
'New Team': '新團隊',
'New Training Course': '新的培訓課程',
'New Volunteer': '新志工',
'New cases in the past 24h': '新案例在過去小時',
'New': '新建',
'Newer Timestamp': '更新時間戳記',
'News': '新聞',
'Next View': '下一頁',
'Next': '下一頁(N)',
'No Activities Found': '沒有找到的活動',
'No Activities currently registered in this event': '在本事件中,並無已登錄的活動',
'No Addresses currently registered': '沒有位址目前登錄',
'No Aid Requests currently registered': '目前沒有任何已登錄的需求',
'No Alternative Items currently registered': '沒有替代項目目前登錄',
'No Assessment Summaries currently registered': '沒有評估目前已登錄摘要',
'No Assessments currently registered': '沒有評估目前登錄',
'No Asset Assignments currently registered': '沒有資產指派目前已登錄',
'No Assets currently registered in this event': '沒有資產目前登錄在此事件',
'No Assets currently registered in this scenario': '沒有資產目前已登錄在這个實務中',
'No Assets currently registered': '沒有資產目前已登錄',
'No Baseline Types currently registered': '沒有基準线類型目前登錄',
'No Baselines currently registered': '沒有基準线目前登錄',
'No Brands currently registered': '沒有品牌目前登錄',
'No Budgets currently registered': '目前沒有預算登錄',
'No Bundles currently registered': '目前沒有軟体組登錄',
'No Camp Services currently registered': '沒有Camp服務目前登錄',
'No Camp Types currently registered': 'Camp沒有類型目前登錄',
'No Camps currently registered': 'Camps沒有目前登錄',
'No Catalog Items currently registered': '沒有型錄項目目前已登錄',
'No Catalogs currently registered': '任何型錄目前已登錄',
'No Category<>Sub-Category<>Catalog Relation currently registered': 'Category<>Sub-沒有Category<>Catalog關系目前已登錄',
'No Checklist available': '沒有可用的清單',
'No Cluster Subsectors currently registered': '沒有Subsectors叢集目前登錄',
'No Clusters currently registered': '沒有"叢集目前登錄',
'No Commitment Items currently registered': '不確定項目目前登錄',
'No Commitments': '沒有Commitments',
'No Configs currently defined': '沒有配置目前定义',
'No Credentials currently set': '目前沒有認證設定',
'No Details currently registered': '沒有詳細資料目前已登錄',
'No Distribution Items currently registered': '沒有分配項目目前登錄',
'No Distributions currently registered': '沒有當前發行版註冊',
'No Documents found': '找不到文件',
'No Donors currently registered': 'Donors目前沒有登錄',
'No Events currently registered': '目前登錄任何事件',
'No Facilities currently registered in this event': '沒有設備目前登錄在此事件',
'No Facilities currently registered in this scenario': '沒有設備目前登錄在這个實務中',
'No Feature Layers currently defined': '沒有功能層目前定义',
'No Flood Reports currently registered': '沒有溢出報告目前正在登錄',
'No GPS data currently registered': '目前無GPS資料被登錄',
'No Groups currently defined': '目前沒有群組定义',
'No Groups currently registered': '目前沒有群組',
'No Hospitals currently registered': '沒有醫院目前登錄',
'No Human Resources currently registered in this event': '沒有人力資源目前已在這个事件',
'No Human Resources currently registered in this scenario': '沒有人力資源目前已登錄在這个實務中',
'No Identification Report Available': '識別沒有報告可用',
'No Identities currently registered': '沒有目前登錄身分',
'No Image currently defined': '沒有映像檔目前定义',
'No Image': '沒有影像',
'No Images currently registered': '沒有影像目前登錄',
'No Impact Types currently registered': '沒有目前有登記的影響類型',
'No Impacts currently registered': '沒有影响目前已登錄',
'No Import Files currently uploaded': '目前無影響檔案被上傳',
'No Incident Reports currently registered': '目前没有事件報告記錄',
'No Incidents currently registered': '目前没有事件記錄',
'No Incoming Shipments': '沒有進貨',
'No Inventories currently have suitable alternative items in stock': '目前沒有合適的替代品尚有庫存',
'No Inventories currently have this item in stock': '目前沒有合適的替代品尚有庫存',
'No Inventory Items currently registered': '目前沒有登記的庫存項目',
'No Inventory Locations currently registered': '目前沒有登記的庫存地點',
'No Inventory Stores currently registered': '無庫存儲存目前已登錄',
'No Item Catalog Category currently registered': '沒有項目型錄種類目前已登錄',
'No Item Catalog currently registered': '沒有項目型錄目前已登錄',
'No Item Categories currently registered': '沒有項目種類目前登錄',
'No Item Packs currently registered': '沒有項目套件目前已登錄',
'No Item Sub-Category currently registered': '項目沒有子類別目前已登錄',
'No Item currently registered': '沒有項目目前已登錄',
'No Items currently registered in this Inventory': '沒有項目目前登錄在此資產',
'No Items currently registered': '沒有項目目前登錄',
'No Items currently requested': '沒有項目目前要求',
'No Keys currently defined': '目前未定义任何金鑰',
'No Kits currently registered': '套件沒有目前登錄',
'No Landmarks currently defined': '沒有目前定义里程碑',
'No Level 1 Assessments currently registered': '沒有層次一評量目前已登錄',
'No Level 2 Assessments currently registered': '沒有層次二評量目前已登錄',
'No Locations currently available': '目前可用的任何位置',
'No Locations currently registered': '任何位置目前登錄',
'No Map Profiles currently defined': '沒有對映配置目前定义',
'No Map Profiles currently registered in this event': '沒有對映配置目前登錄在此事件',
'No Map Profiles currently registered in this scenario': '沒有對映配置目前登錄在這个實務中',
'No Markers currently available': '沒有當前可用標記',
'No Match': '沒有相符的項目',
'No Matching Catalog Items': '沒有相符的型錄項目',
'No Matching Items': '沒有相符的項目',
'No Matching Records': '沒有相符的記錄',
'No Members currently registered': '沒有成員目前登錄',
'No Memberships currently defined': '沒有資格目前定义',
'No Messages currently in Outbox': '沒有訊息目前在寄件匣',
'No Metadata currently defined': '目前沒有meta資料定义',
'No Need Types currently registered': '不需要目前登錄類型',
'No Needs currently registered': '目前沒有登錄需要',
'No Offices currently registered': '沒有辦公室目前登錄',
'No Offices found!': '沒有辦公室找到!',
'No Organizations currently registered': '目前登錄任何組織',
'No Organizations registered!': '沒有組織登錄!',
'No Packs for Item': '此品項無包裝',
'No Partners currently registered': '沒有伙伴目前登錄',
'No Patients currently registered': '目前沒有病人登錄',
'No Peers currently registered': '沒有同層級目前登錄',
'No People currently committed': '目前無人承諾',
'No People currently registered in this camp': '沒有人員目前登錄在此camp',
'No People currently registered in this shelter': '沒有人員目前登錄在此shelter',
'No Persons currently registered': '沒有人員目前已登錄',
'No Persons currently reported missing': '沒有人員目前報告遺漏',
'No Persons found': '沒有找到人員',
'No Photos found': '沒有找到照片',
'No Picture': '沒有圖片',
'No Population Statistics currently registered': '沒有移入目前已登錄统計',
'No Presence Log Entries currently registered': '不存在日誌目前已登錄',
'No Problems currently defined': '目前沒有問題定义',
'No Projections currently defined': '沒有估算目前定义',
'No Projects currently registered': '沒有專案目前已登錄',
'No Rapid Assessments currently registered': '沒有快速評估目前登錄',
'No Received Items currently registered': '沒有收到項目目前登錄',
'No Received Shipments': '沒有收到出貨',
'No Records currently available': '沒有記錄當前可用',
'No Records matching the query': '沒有符合查詢的記錄',
'No Reports currently registered': '沒有報告目前登錄',
'No Request Items currently registered': '項目目前沒有要求註冊',
'No Requests have been made yet': '沒有要求尚未完成',
'No Requests match this criteria': '沒有要求符合此準則',
'No Requests': '沒有要求',
'No Responses currently registered': '沒有回應目前已登錄',
'No Rivers currently registered': 'Rivers目前沒有登錄',
'No Roles currently defined': '目前未定义任何角色',
'No Rooms currently registered': '沒有會談室目前登錄',
'No Scenarios currently registered': '沒有目前登錄實務',
'No School Districts currently registered': '沒有學校行政區目前登錄',
'No School Reports currently registered': '沒有學校報告目前登錄',
'No Sections currently registered': '沒有區段目前登錄',
'No Sectors currently registered': '目前沒有已註冊部門',
'No Sent Items currently registered': '目前沒有已發送項目',
'No Sent Shipments': '沒有已發送貨物',
'No Settings currently defined': '目前沒有定義設置',
'No Shelter Services currently registered': '目前沒有註冊住房服務',
'No Shelter Types currently registered': '沒有Shelter類型目前登錄',
'No Shelters currently registered': '目前沒有註冊住房',
'No Shipment Transit Logs currently registered': '沒有出貨日誌传送目前已登錄',
'No Shipment/Way Bills currently registered': '沒有出貨/方式账單目前已登錄',
'No Shipment<>Item Relation currently registered': '沒有Shipment<>Item關系目前登錄',
'No Sites currently registered': '沒有站點目前登錄',
'No Skill Types currently set': '技能沒有類型目前設定',
'No Solutions currently defined': '沒有解决方案目前定义',
'No Sources currently registered': '沒有來源目前登錄',
'No Staff Types currently registered': '沒有人員類型目前登錄',
'No Staff currently registered': '沒有人員目前已登錄',
'No Storage Bin Type currently registered': '沒有儲存体bin目前登錄類型',
'No Storage Bins currently registered': '沒有儲存目前登錄紙匣',
'No Storage Locations currently registered': '沒有儲存体位置目前登錄',
'No Subscription available': '沒有可用的訂閱',
'No Subsectors currently registered': '目前沒有已登錄界別分組',
'No Support Requests currently registered': '不支援要求目前已登錄',
'No Survey Answers currently entered.': '目前沒有已輸入調查答案。',
'No Survey Answers currently registered': '沒有意見調查答案目前登錄',
'No Survey Questions currently registered': '沒有調查問題目前登錄',
'No Survey Sections currently registered': '沒有調查區段目前登錄',
'No Survey Series currently registered': '沒有調查系列目前登錄',
'No Survey Template currently registered': '沒有調查范本目前已登錄',
'No Sync': '沒有同步',
'No Tasks currently registered in this event': '在此事件中,無任何登錄的任務',
'No Tasks currently registered in this scenario': '在此情境中,無任何登錄的任務',
'No Tasks with Location Data': '沒有作業位置資料',
'No Teams currently registered': '目前沒有已登錄團隊',
'No Themes currently defined': '沒有主題目前定义',
'No Tickets currently registered': '沒有單目前登錄',
'No Tracks currently available': '沒有追蹤目前可用',
'No Units currently registered': '單元沒有目前登錄',
'No Updates currently registered': '沒有更新目前已登錄',
'No Users currently registered': '沒有使用者目前已登錄',
'No Volunteers currently registered': '沒有目前志愿者註冊',
'No Warehouses currently registered': '沒有目前登錄倉庫',
'No access at all': '沒有存取在所有',
'No access to this record!': '無法存取這个記錄!',
'No action recommended': '沒有建議的動作',
'No conflicts logged': '沒有冲突登入',
'No contact information available': '沒有可用的聯絡資訊',
'No contacts currently registered': '沒有聯絡人目前登錄',
'No data in this table - cannot create PDF!': '沒有此表格中的資料-無法建立PDF!',
'No databases in this application': '這个應用程式中任何資料庫',
'No dead body reports available': '沒有传送主体可用報告',
'No entries found': '找不到項目',
'No entries matching the query': '沒有符合查詢的項目',
'No entry available': '沒有可用的項目',
'No import jobs': '沒有匯入工作',
'No linked records': '沒有鏈結記錄',
'No location known for this person': '任何位置的識別此人員',
'No location known of this person.': '位置沒有已知的這位人員。',
'No locations found for members of this team': '任何位置找到的此小組的成員',
'No locations registered at this level': '任何位置登錄在這个層次',
'No log entries matching the query': '沒有符合查詢的日誌項目',
'No matching records found.': '沒有找到相符的記錄。',
'No messages in the system': '系统中沒有訊息',
'No notes available': '沒有可用的筆記',
'No of Families Settled in the Schools': '沒有的系列已在學校',
'No of Families to whom Food Items are Available': '沒有的系列對象食品可用的項目',
'No of Families to whom Hygiene is Available': '沒有的系列對象Hygiene是可用的',
'No of Families to whom Non-Food Items are Available': '沒有的系列對象的非食品可用的項目',
'No of Female Students (Primary To Higher Secondary) in the Total Affectees': '沒有的女性學員(主要較次要)總數中Affectees',
'No of Female Teachers & Other Govt Servants in the Total Affectees': '沒有的女性教師和其他政府服務者總數中Affectees',
'No of Male Students (Primary To Higher Secondary) in the Total Affectees': '沒有的男性學員(主要較次要)總數中Affectees',
'No of Male Teachers & Other Govt Servants in the Total Affectees': '沒有的男性教師和其他政府服務者總數中Affectees',
'No of Rooms Occupied By Flood Affectees': '沒有的檔案室被水災Affectees',
'No peers currently registered': '沒有同層級目前登錄',
'No pending registrations found': '沒有擱置的登錄找到',
'No pending registrations matching the query': '沒有擱置符合查詢的登錄',
'No person record found for current user.': '沒有人員記錄找到的現行使用者。',
'No positions currently registered': '沒有位置目前登錄',
'No problem group defined yet': '沒有問題群組尚未定义',
'No records matching the query': '沒有符合查詢的記錄',
'No records to delete': '沒有要刪除的記錄',
'No recovery reports available': '回复沒有可用的報告',
'No report available.': '沒有可用的報告。',
'No reports available.': '沒有可用的報告。',
'No reports currently available': '目前沒有可用的報告',
'No requests currently registered': '沒有要求目前已登錄',
'No requests found': '找不到要求',
'No resources currently registered': '沒有資源目前已登錄',
'No resources currently reported': '沒有資源目前報告',
'No service profile available': '沒有可用的服務配置',
'No skills currently set': '任何技術目前設定',
'No staff or volunteers currently registered': '目前沒有已登錄人員或志願者',
'No status information available': '沒有可用的狀態資訊',
'No sync permitted!': '不允許同步!',
'No synchronization': '沒有同步化',
'No tasks currently assigned': '沒有任務被指派',
'No tasks currently registered': '沒有作業目前已登錄',
'No template found!': '沒有找到范本!',
'No units currently registered': '單元沒有目前登錄',
'No volunteer availability registered': '沒有已登錄志願者可用性',
'No volunteer information registered': '沒有主動資訊登錄',
'No': '無影響',
'Non-medical Staff': '非醫療工作人員',
'Non-structural Hazards': '非結构危害',
'None (no such record)': '無(無記錄)',
'None': '刪除',
'Normal food sources disrupted': '正常食品來源中斷',
'Normal': '正常',
'Not Applicable': '不適用',
'Not Authorised!': '未获授權!',
'Not Possible': '不可能',
'Not Set': '未設定',
'Not Authorized': '未获授權',
'Not installed or incorrectly configured.': '未安裝或配置不正確。',
'Not supported': '不支援',
'Not yet a Member of any Group': '沒有資格目前登錄',
'Note Details': '附註詳細資料',
'Note Status': '附註狀態',
'Note Type': '附註類型',
'Note added': '已新增附註',
'Note deleted': '刪除附註',
'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead': '注意這份清單只會顯示作用中的参与者。 若要查看所有人註冊系统中,搜尋"首頁"螢幕中的而不是',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': '注意這份清單只會顯示作用中的参与者。 若要查看所有人註冊系统中,搜尋從這个畫面而不是',
'Note updated': '附註更新',
'Note': '附註',
'Notes': '附註',
'Notice to Airmen': '注意要Airmen',
'Number of Columns': '直欄數',
'Number of Patients': '號碼的病人',
'Number of People Affected': '受影响的人員數',
'Number of People Deceased': '號碼的人員死亡',
'Number of People Injured': '號碼的人員受傷',
'Number of Rows': '橫列數',
'Number of Vehicles': '車輛數目',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'beds許多其他的類型預期為變成可用在此單元中下一个24小時。',
'Number of alternative places for studying': '號碼的替代工作區的研究',
'Number of available/vacant beds of that type in this unit at the time of reporting.': '號碼的可用/ beds用的輸入這个單位時間的報告。',
'Number of deaths during the past 24 hours.': '號碼的deaths在過去24小時。',
'Number of discharged patients during the past 24 hours.': '號碼的放電病患過去24小時。',
'Number of doctors actively working': '號碼的醫師主動工作',
'Number of doctors': '號碼的醫生',
'Number of houses damaged, but usable': '號碼的安置損壞,但可用',
'Number of houses destroyed/uninhabitable': '號碼的安置損毀/uninhabitable',
'Number of in-patients at the time of reporting.': '號碼的病患在時間的報告。',
'Number of latrines': '號碼的latrines',
'Number of midwives actively working': '號碼的midwives主動工作',
'Number of newly admitted patients during the past 24 hours.': '號碼的新送入病患過去24小時。',
'Number of non-medical staff': '號碼的非醫療工作人員',
'Number of nurses actively working': '號碼的nurses主動工作',
'Number of nurses': '號碼的nurses',
'Number of private schools': '號碼的專用學校',
'Number of public schools': '號碼的公用學校',
'Number of religious schools': '场所數學校',
'Number of residential units not habitable': '號碼的住宅單位不habitable',
'Number of residential units': '號碼的住宅單位',
'Number of schools damaged but usable': '號碼的學校損壞但可用',
'Number of schools destroyed/uninhabitable': '號碼的學校損毀/uninhabitable',
'Number of schools open before disaster': '號碼的學校先開啟災難',
'Number of schools open now': '號碼的學校開啟現在',
'Number of teachers affected by disaster': '號碼的教師影响災難',
'Number of teachers before disaster': '災前教師人數',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': '這家醫院空置/可用病床數。 自動更新從每日報告。',
'Number of vacant/available units to which victims can be transported immediately.': '受害人可立即運送空置/可用單位數。',
'Number or Label on the identification tag this person is wearing (if any).': '聯絡人配戴的識別證編號或符號 (如果有).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': '用以標記要搜尋的地方的號碼或代碼,如標誌代碼,網格坐標,場地參考號碼或類似(如果有)',
'Number': '號碼',
'Number/Percentage of affected population that is Female & Aged 0-5': '婦女和0-5歲數量 /占受影響的人口百分比',
'Number/Percentage of affected population that is Female & Aged 13-17': '婦女和13至17歲數量 /占受影響的人口百分比',
'Number/Percentage of affected population that is Female & Aged 18-25': '號碼/百分比的受影响的移入的女性值(18到25',
'Number/Percentage of affected population that is Female & Aged 26-60': '號碼/百分比的受影响的移入的女性值(26到60',
'Number/Percentage of affected population that is Female & Aged 6-12': '號碼/百分比的受影响的移入的女性值(六到12',
'Number/Percentage of affected population that is Female & Aged 61+': '號碼/百分比的受影响的移入的女性值(61+',
'Number/Percentage of affected population that is Male & Aged 0-5': '號碼/百分比的受影响的移入的男性值(〇到五',
'Number/Percentage of affected population that is Male & Aged 13-17': '號碼/百分比的受影响的移入的男性值(13到17',
'Number/Percentage of affected population that is Male & Aged 18-25': '號碼/百分比的受影响的移入的男性值(18到25',
'Number/Percentage of affected population that is Male & Aged 26-60': '受影響人口中男性26到60歲的人數/百分比',
'Number/Percentage of affected population that is Male & Aged 6-12': '受影響人口中男性6到12歲的人數/百分比',
'Number/Percentage of affected population that is Male & Aged 61+': '受影響人口中男性61歲以上的人數/百分比',
'Numbers Only': '只能填數字',
'Nurse': '護士',
'Nursing Information Manager': '看護資訊管理程式',
'Nutrition problems': '營養問題',
'Nutrition': '營養',
'OK': '確定',
'OR Reason': '或原因',
'OR Status Reason': '或狀態原因',
'OR Status': '或狀態',
'Observer': '觀察程式 (observer)',
'Obsolete': '已作廢',
'Office Address': '辦公室地址',
'Office Details': '辦公室詳細資料',
'Office Phone': '辦公室電話',
'Office added': '辦公室新增',
'Office deleted': '辦公室刪除',
'Office updated': '辦公室更新',
'Office': '辦公室',
'Offices & Warehouses': '辦公室与倉庫',
'Offices': '辦公室',
'Offline Sync (from USB/File Backup)': '離线同步(從USB/檔案備份)',
'Offline Sync': '離線同步',
'Old': '舊',
'Older people as primary caregivers of children': '舊的人員為主要caregivers的子項',
'Older people in care homes': '舊的人員在管理Home',
'Older people participating in coping activities': '舊的人員参与复制活動',
'Older people with chronical illnesses': '舊的人chronical疾病',
'Older person (>60 yrs)': '舊的人員(>60年期)',
'On by default? (only applicable to Overlays)': '依預設值嗎? (僅適用于覆蓋)',
'On by default?': '依預設值嗎?',
'On-site Hospitalization': '現场住院',
'One Time Cost': '單次成本',
'One time cost': '單次成本',
'One-time costs': '一-時間成本',
'One-time': '一次',
'Oops! Something went wrong...': '糟糕! 發生錯誤。',
'Oops! something went wrong on our side.': '糟糕! 發生錯誤的面。',
'Opacity (1 for opaque, 0 for fully-transparent)': '透明(一為透明,〇為完全透明)',
'Open Map': '開啟地圖',
'Open area': '開啟區域',
'Open in New Tab': '在新標籤中開啟',
'Open recent': '開啟最近文件',
'Open': '開啟',
'OpenID Login': 'OpenID 登入',
'OpenID authenticated successfully.': 'OpenID順利鉴別。',
'Operating Rooms': '作業房間',
'Operation': '作業',
'Optional link to an Incident which this Assessment was triggered by.': '選用鏈結到此事件評估所觸發。',
'Optional': '選用',
'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': '選用。 如果您想要的樣式功能為基礎的值屬性,請選取屬性以使用在這裡。',
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': '選用。 在GeoServer,這是工作區名稱空間URI (名稱不!)。在WFS getCapabilities,這是FeatureType名稱部分之前,冒號(:)。',
'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': '選用。 在GeoServer,這是工作區名稱空間URI。 在WFS getCapabilities,這是FeatureType名稱組件之前的冒號(:)。',
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': '選用。 名稱元素的內容應該是一个URL的映像檔放入蹦現畫面。',
'Optional. The name of an element whose contents should be put into Popups.': '選用。 名稱元素的內容應該進入蹦現畫面。',
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': '選用。 綱目的名稱。 在Geoserver這个格式為http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name。',
'Options': '選項',
'Organization Details': '組織明細',
'Organization Registry': '組織登錄',
'Organization added': '新增組織',
'Organization deleted': '刪除組織',
'Organization updated': '更新組織',
'Organization': '組織',
'Organizations': '組織',
'Origin of the separated children': '原始的分隔的子項',
'Origin': '源點',
'Other (describe)': '其他(說明)',
'Other (specify)': '其他(請說明)',
'Other Evidence': '其他證据',
'Other Faucet/Piped Water': '其他Faucet/管道式臨界值',
'Other Isolation': '其他隔離',
'Other Name': '其他名稱',
'Other activities of boys 13-17yrs before disaster': '其他活動的提升13 17yrs之前災難',
'Other activities of boys 13-17yrs': '其他活動的男女13-17yrs',
'Other activities of boys <12yrs before disaster': '其他活動的男孩<12yrs之前災難',
'Other activities of boys <12yrs': '其他活動的<12yrs男女',
'Other activities of girls 13-17yrs before disaster': '其他活動的女孩13 17yrs之前災難',
'Other activities of girls 13-17yrs': '其他活動的女孩13-17yrs',
'Other activities of girls<12yrs before disaster': '其他活動的girls<12yrs之前災難',
'Other activities of girls<12yrs': '其他活動的girls<12yrs',
'Other alternative infant nutrition in use': '其他使用中的嬰兒營養品',
'Other alternative places for study': '其他研究區',
'Other assistance needed': '其他需要的恊助',
'Other assistance, Rank': '其他协助,等級',
'Other current health problems, adults': '其他目前健康問題,成人',
'Other current health problems, children': '其他目前健康問題,小孩',
'Other events': '其他事件',
'Other factors affecting school attendance': '其他因素影响學校与會者',
'Other major expenses': '其他主要費用',
'Other non-food items': '其他非食品項目',
'Other recommendations': '其他建議',
'Other residential': '其他居住地',
'Other school assistance received': '其他协助學校接收',
'Other school assistance, details': '其他协助學校,詳細資料',
'Other school assistance, source': '其他协助學校,來源',
'Other settings can only be set by editing a file on the server': '其他設定才能設定來編輯一个伺服器上的檔案',
'Other side dishes in stock': '其他端餐盤庫存',
'Other types of water storage containers': '其他類型的臨界值儲存体儲存區',
'Other ways to obtain food': '其他方式來取得食品',
'Other': '其他',
'Outbound Mail settings are configured in models/000_config.py.': '外寄郵件設定中配置模型/000_config.. py。',
'Outbox': '寄件匣',
'Outgoing SMS Handler': 'SMS送出的處理常式',
'Outgoing SMS handler': 'SMS送出的處理常式',
'Overall Hazards': '整体危害',
'Overhead falling hazard': '額外落在危害',
'Overland Flow Flood': '歐弗蘭流程水災',
'Overlays': '重疊',
'Owned Resources': '擁有的資源',
'PF Number': 'PF號碼',
'PIN number': '密碼',
'PIN': '密碼',
'PL Women': 'PL/I婦女',
'Pack': '包',
'Packs': '套件',
'Pan Map: keep the left mouse button pressed and drag the map': '平移對映:請將左滑鼠按鈕并拖曳的對映',
'Parameters': '參數',
'Parent Office': '母項辦公室',
'Parent needs to be of the correct level': '母項必须是正確的層次',
'Parent needs to be set for locations of level': '母項需要設定的位置的層次',
'Parent needs to be set': '母項需要設定',
'Parent': '母項',
'Parents/Caregivers missing children': '母項/Caregivers遺漏子項',
'Partial Database Synchronization': '部分資料庫同步化',
'Partial': '局部',
'Participant': '參與者',
'Partner Details': '夥伴詳細資料',
'Partner added': '新增伙伴',
'Partner deleted': '已刪除夥伴',
'Partner updated': '已更新夥伴',
'Partners': '夥伴',
'Pashto': '普什圖文',
'Pass': '通過 (pass)',
'Passport': '護照',
'Password for authentication at the peer. Note that only HTTP Basic authentication is supported.': '鉴別的密碼在同層級。 注意,只支援HTTP基本鉴別。',
'Password': '密碼',
'Path': '路徑',
'Patients': '病患',
'Peer Details': '同層級詳細資料',
'Peer Registration Details': '同層級註冊詳細資料',
'Peer Registration Request': '同層級註冊申請',
'Peer Registration': '同層級註冊',
'Peer Type': '等式類型',
'Peer UID': '同層級UID',
'Peer added': '新增同層級',
'Peer deleted': '刪除同層級',
'Peer not allowed to push': '同層級不允許推送',
'Peer registration request added': '已加入之同層級註冊申請',
'Peer registration request deleted': '已刪除之同層級註冊申請',
'Peer registration request updated': '已更新之同層級註冊申請',
'Peer updated': '更新同層級',
'Peer': '對等',
'Peers': '同層級',
'Pending Requests': '擱置要求',
'Pending': '擱置中',
'People Needing Food': '人員需要食品',
'People Needing Shelter': '人員需要Shelter',
'People Needing Water': '人員需要水',
'People Trapped': '人員再遷就',
'People with chronical illnesses': '与人員chronical疾病',
'People': '個人',
'Performance Rating': '效能等級',
'Person 1': '人員 1',
'Person 1, Person 2 are the potentially duplicate records': '一人,人員二是潛在的重复記錄',
'Person 2': '人員 2',
'Person Data': '人員資料',
'Person De-duplicator': 'DE人員-duplicator',
'Person Details': '人員明細',
'Person Finder': '人員搜尋器',
'Person Registry': '人員登錄',
'Person added to Group': '群組成員已新增',
'Person added to Team': '群組成員已新增',
'Person added': '已新增人員',
'Person deleted': '人員刪除',
'Person details updated': '人員詳細資料更新',
'Person found': '找到人員',
'Person interviewed': '人員受訪',
'Person missing': '遺漏人員',
'Person reporting': '人員報告',
'Person who has actually seen the person/group.': '人員實際上就是人員/群組。',
'Person who is reporting about the presence.': '人員報告的参与。',
'Person who observed the presence (if different from reporter).': '觀察人員的狀態(如果不同報告)。',
'Person': '聯絡人',
'Person/Group': '人員/群組',
'Personal Data': '個人資料',
'Personal Effects Details': '个人效果詳細資料',
'Personal Effects': '个人效果',
'Personal Map': '个人對映',
'Personal Profile': '個人設定檔',
'Personal impact of disaster': '个人的影响災難',
'Persons in institutions': '人員在機构',
'Persons per Dwelling': '每个人員住宅',
'Persons with disability (mental)': '与人員殘障人士(內部)',
'Persons with disability (physical)': '与人員殘障人士(實体)',
'Persons': '人員',
'Phone 1': '電話一',
'Phone 2': '電話二',
'Phone': '電話',
'Phone/Business': '電話/商業',
'Phone/Emergency': '電話/緊急',
'Phone/Exchange (Switchboard)': '電話/交換(switchboard)',
'Phone/Exchange': '電話/交換',
'Photo Details': '照片詳細資料',
'Photo Taken?': '照片採取?',
'Photo added': '新增照片',
'Photo deleted': '刪除照片',
'Photo updated': '更新照片',
'Photo': '照片',
'Photograph': '照片',
'Photos': '照片',
'Physical Description': '實體說明',
'Physical Safety': '實体安全',
'Physical': '實體',
'Picture upload and finger print upload facility': '圖像上传和指紋上載機能',
'Picture': '圖片',
'Place for solid waste disposal': '位置的實心廢棄物',
'Place of Recovery': '位置的回复',
'Place': '街 (Place)',
'Places for defecation': '工作區的defecation',
'Places the children have been sent to': '工作區的子項已传送至',
'Planning': '規劃',
'Playing': '播放',
'Please correct all errors.': '請更正所有錯誤。',
'Please enter a First Name': '請輸入名字',
'Please enter a Google Key if you wish to use Google Layers': '如果你想使用谷歌層,請輸入谷歌的關鍵鑰',
'Please enter a Yahoo Key if you wish to use Yahoo Layers': '如果你想使用雅虎層,請輸入雅虎的關鍵鑰',
'Please enter a first name': '請輸入名字',
'Please enter a site OR a location': '請輸入一个站點或一个位置',
'Please enter a valid email address': '請輸入一個有效的電子郵件地址',
'Please enter the first few letters of the Person/Group for the autocomplete.': '請輸入的前幾个字母的人員/群組使用。',
'Please enter the recipient': '請輸入收件者',
'Please fill this!': '請填寫這个!',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened. If a ticket was issued then please provide the Ticket ID.': '請提供URL的頁面時,您正在参照說明的您的預期發生,和什么實際發生。 如果一个單發出,則請提供的摘記卷ID。',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': '請提供URL的頁面時,您正在参照說明的您的預期發生,和什么實際發生。',
'Please report here where you are:': '請報告此位置您:',
'Please select another level': '請選取另一个層次',
'Please select': '請選取',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': '請註冊您的行動電話,因為這可讓我們向您传送的文字訊息。 請包括完整區域碼。',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': '請指定任何問題与障碍的適當處理的疾病,詳細的(號碼,適用的話)。 您也可以新增建議的狀湟可能改善。',
'Please use this field to record any additional information, including a history of the record if it is updated.': '請使用這个欄位來記錄任何其他資訊,包括一个歷史記錄的如果已更新。',
'Please use this field to record any additional information, including any Special Needs.': '請使用這个欄位來記錄任何其他資訊,包括任何特殊需求。',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': '請使用這个欄位來記錄任何其他資訊,例如Ushahidi實例ID。 包含歷史記錄的如果已更新。',
'Pledge Aid to match these Requests': '抵押輔助以符合這些要求',
'Pledge Aid': '抵押輔助',
'Pledge Status': '抵押狀態',
'Pledge Support': '抵押支援',
'Pledge': '選擇權質押',
'Pledged': '抵押',
'Pledges': '抵押',
'Poisonous Gas': '有毒瓦斯',
'Police': '保單',
'Policy': '政策',
'Pollution and other environmental': '污染和其他環境',
'Polygon reference of the rating unit': '多邊形参照的等級單元',
'Polygon': '多邊形',
'Poor': '差',
'Population Statistic Details': '移入统計資料明細',
'Population Statistic added': '人口统計資料新增',
'Population Statistic deleted': '人口统計資料刪除',
'Population Statistic updated': '人口统計資料更新',
'Population Statistics': '人口统計',
'Population and number of households': '人口與戶數',
'Population': '人口',
'Popup Fields': '蹦現欄位',
'Popup Label': '蹦現標籤',
'Porridge': '稀飯',
'Port Closure': '埠關閉',
'Port': '埠',
'Position Catalog': '位置型錄',
'Position Details': '位置詳細資料',
'Position added': '新增位置',
'Position deleted': '刪除位置',
'Position type': '位置類型',
'Position updated': '更新位置',
'Position': '位置',
'Positions': '職位',
'Postcode': 'postcode',
'Poultry restocking, Rank': 'Poultry簽有,等級',
'Pounds': '英鎊',
'Power Failure': '電源故障',
'Powered by Sahana Eden': '採用Sahana Eden',
'Pre-cast connections': '預先强制轉型連线',
'Preferred Name': '暱稱',
'Pregnant women': 'Pregnant婦女',
'Preliminary': '初步的',
'Presence Condition': '存在條件',
'Presence Log': '存在日誌',
'Presence': '存在',
'Previous View': '前一頁',
'Previous': '前一頁(P)',
'Primary Name': '主要名稱',
'Primary Occupancy': '主要佔用',
'Priority Level': '優先順序層次',
'Priority from 1 to 9. 1 is most preferred.': '优先順序從一到九。 一是最偏好。',
'Priority': '優先順序',
'Private': '專用',
'Problem Administration': '問題管理',
'Problem Details': '問題明細',
'Problem Group': '問題群組',
'Problem Title': '問題標題',
'Problem added': '新增問題',
'Problem connecting to twitter.com - please refresh': 'problem connecting to twitter.com-請重新整理',
'Problem deleted': '問題已刪除',
'Problem updated': '問題已更新',
'Problem': '問題',
'Problems': '問題',
'Procedure': '程序',
'Process Received Shipment': '程序接收到出貨',
'Process Shipment to Send': '出貨程序來传送',
'Procurements': '採購',
'Product Description': '產品說明',
'Product Name': '產品名稱',
'Profile updated': '更新設定檔',
'Profile': '設定檔',
'Project Activities': '專案活動',
'Project Details': '專案詳細資料',
'Project Management': '專案管理',
'Project Status': '項目狀態',
'Project Tracking': '項目追蹤',
'Project added': '新增專案',
'Project deleted': '已刪除專案',
'Project has no Lat/Lon': '專案沒有LAT/長',
'Project updated': '項目更新',
'Project': '專案 (project)',
'Projection Details': '預測明細',
'Projection added': '新增投射',
'Projection deleted': '投射刪除',
'Projection updated': '預測更新',
'Projection': '投射',
'Projections': '預測',
'Projects': '項目',
'Property reference in the council system': '議會制度中的物業參考',
'Protected resource': '受保護的資源',
'Protection': '保護',
'Provide Metadata for your media files': '提供meta資料的媒体檔案',
'Provide a password': '提供一个密碼。',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': '提供一个可選的草稿的整个建筑物或損壞點。 指出損壞點。',
'Province': '省',
'Proxy-server': 'Proxy伺服器',
'Psychiatrics/Adult': 'Psychiatrics/Adult.txt',
'Public Event': '公用事件',
'Public and private transportation': '公共和私有運輸',
'Public assembly': '公共組件',
'Public': '公用',
'Pull tickets from external feed': '拉出單從外部源',
'Punjabi': '旁遮普文',
'Purchase Date': '採購日期',
'Push tickets to external system': '推送tickets to外部系统',
'Put a choice in the box': '放置選擇框中',
'Pyroclastic Flow': 'Pyroclastic流程',
'Pyroclastic Surge': 'Pyroclastic突波',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': '序列模組Python內無法使用執行中的Python-這需要安裝到啟動數据機',
'Python needs the ReportLab module installed for PDF export': 'ReportLab模組內無法使用執行中的Python-這需要安裝PDF輸出!',
'Quantity Committed': '確定數量',
'Quantity Fulfilled': '履行數量',
'Quantity in Transit': '在途數量',
'Quantity': '數量',
'Quarantine': '隔離',
'Queries': '查詢',
'Query Feature': '查詢功能',
'Query': '查詢 (query)',
'Queryable?': '查詢?',
'RC frame with masonry infill': 'rc masonry infill与框架',
'RECORD A': '記錄 A',
'RECORD B': '記錄B',
'RESPONSE': '回應',
'RPC Service URL': 'RPC服務URL',
'Race': '競爭',
'Radio Callsign': '電台呼號',
'Radiological Hazard': 'Radiological危害',
'Railway Accident': '鐵路事故',
'Railway Hijacking': '鐵路强制存取',
'Rain Fall': '落在雨',
'Rapid Assessment Details': '快速評估詳細資料',
'Rapid Assessment added': '快速新增評量',
'Rapid Assessment deleted': '快速評估刪除',
'Rapid Assessment updated': '快速更新評量',
'Rapid Assessment': 'Rapid評量',
'Rapid Assessments & Flexible Impact Assessments': '快速評估彈性与影响評量',
'Rapid Assessments': 'Rapid評量',
'Rapid Close Lead': '快速關閉商機',
'Rapid Data Entry': '快速數據輸入',
'Rating Scale': '評分',
'Raw Database access': '原始資料庫存取',
'Read-Only': '唯讀',
'Read-only': '唯讀',
'Real World Arbitrary Units': '實際單位任意',
'Receive Items': '接收項目',
'Receive New Shipment': '接收新出貨',
'Receive Shipment': '接收貨物',
'Receive this shipment?': '接收此出貨?',
'Receive': '接收',
'Received By Person': '接收人',
'Received By': '接收',
'Received Item Details': '接收項目詳細資料',
'Received Item deleted': '接收項目刪除',
'Received Item updated': '接收更新項目',
'Received Shipment Details': '收到出貨詳細資料',
'Received Shipment canceled and items removed from Inventory': '接收貨物取消和項目從庫存移除',
'Received Shipment canceled': '接收貨物取消',
'Received Shipment updated': '接收貨物更新',
'Received Shipments': '收到出貨',
'Received': '已接收',
'Receiving and Sending Items': '接收和發送項目',
'Recipient': '收件者',
'Recipients': '收件人',
'Recommendations for Repair and Reconstruction or Demolition': '維修和重建或拆除的建議',
'Record %(id)s created': '記錄百分比 %(id)s 建立',
'Record %(id)s updated': '記錄百分比 %(id)s 更新',
'Record Details': '記錄詳細資料',
'Record ID': '記錄 ID',
'Record Saved': '儲存記錄',
'Record added': '已新增記錄',
'Record any restriction on use or entry': '記錄任何限制使用或項目',
'Record deleted': '刪除記錄',
'Record last updated': '記錄前次更新',
'Record not found!': '記錄未找到!',
'Record not found': '找不到記錄',
'Record updated': '更新記錄',
'Record': '記錄',
'Recording and Assigning Assets': '錄制及指派資產',
'Records': '記錄',
'Recovery Reports': '回復報告',
'Recovery Request added': '新增回复要求',
'Recovery Request deleted': '回复刪除要求',
'Recovery Request updated': '回复要求更新',
'Recovery Request': '回復要求',
'Recovery Requests': '回复要求',
'Recovery report added': '复原報告新增',
'Recovery report deleted': '复原報告刪除',
'Recovery report updated': '回复更新報告',
'Recovery': '回復',
'Recurring Cost': '循環成本',
'Recurring cost': '循環成本',
'Recurring costs': '循環成本',
'Recurring': '重複出現',
'Red Cross / Red Crescent': '紅色的叉號Crescent /紅',
'Red': '紅色',
'Reference Document': '参考文件',
'Refers to default syncronization policy adopted if data entry recieved from other machine is already present in your machine.': '参照預設同步化原則採用如果資料項目接收從其他機器中已存在您的機器。',
'Refresh Rate (seconds)': '更新頻率(秒)',
'Region Location': '區域位置',
'Region': '區域',
'Regional': '地區',
'Regions': '地區',
'Register Person into this Camp': '登錄人員到這个Camp',
'Register Person into this Shelter': '登錄人員到這个Shelter',
'Register Person': '登錄人員',
'Register them as a volunteer': '登錄它們作為一个主動',
'Register': '註冊',
'Registered People': '註冊的人員',
'Registered users can': '註冊使用者可以',
'Registering ad-hoc volunteers willing to contribute': '註冊特定参与者愿意提供',
'Registration Details': '註冊詳細資料',
'Registration added': '新增登錄',
'Registration entry deleted': '刪除登錄項目',
'Registration is pending approval': '申請等候核准中',
'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': '登錄仍在擱置核准從核准者 (%s) -請稍候直到收到確認。',
'Registration key': '登錄索引鍵',
'Registration successful': '登錄成功',
'Registration updated': '更新登錄',
'Registration': '登錄',
'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '登錄追蹤所有的組織工作在災難區域。 它不只會擷取工作區所作用中,但也會擷取的相關資訊范圍的專案會提供每一个區域。',
'Rehabilitation/Long Term Care': '复健/長期照護',
'Reinforced masonry': 'masonry强化',
'Rejected': '已拒絕',
'Reliable access to sanitation/hygiene items': '可靠地存取設施/hygiene項目',
'Relief Item Catalog': '浮雕項目型錄',
'Relief Item Details': '浮雕項目詳細資料',
'Relief Item': '浮雕項目',
'Relief Items stored in Inventories in different locations': '項目是儲存在位于不同地點的庫存',
'Relief Items': '浮雕項目',
'Relief Team': '救難隊',
'Relief': '浮雕',
'Religion': '宗教',
'Religious Leader': '宗教領導者',
'Religious': '教歷',
'Relocate as instructed in the <instruction>': '重新定位為中的指示<instruction>',
'Remove Activity from this event': '從這個事件中移除活動',
'Remove Asset from this event': '移除資產從這个事件',
'Remove Asset from this scenario': '移除資產從這个實務',
'Remove Document from this request': '從這個需求中移除文件',
'Remove Facility from this event': '移除機能從這个事件',
'Remove Facility from this scenario': '移除機能從這个實務',
'Remove Feature: Select the feature you wish to remove & press the delete key': '移除特性:選取的功能時,您要移除按下"刪除"鍵',
'Remove Human Resource from this event': '移除人力資源從這个事件',
'Remove Human Resource from this scenario': '移除人力資源從這个實務',
'Remove Item from Inventory': '從庫存移除項目',
'Remove Map Profile from this event': '移除對映配置從這个事件',
'Remove Map Profile from this scenario': '移除對映配置從這个實務',
'Remove Person from Group': '刪除組員',
'Remove Person from Team': '刪除組員',
'Remove Skill from Request': '從需求中移除技能',
'Remove Skill': '移除技能',
'Remove Task from this event': '從事件中移除任務',
'Remove Task from this scenario': '從情境中移除此任務',
'Remove this asset from this event': '移除這个資產從這个事件',
'Remove this asset from this scenario': '移除這个資產從這个實務',
'Remove this facility from this event': '從這個活動中移除此設備',
'Remove this facility from this scenario': '從這個情境中移除此設備',
'Remove this human resource from this event': '從這個活動中移除此人力資源',
'Remove this human resource from this scenario': '從這個情境中移除此人力資源',
'Remove this task from this event': '從這個活動中移除此任務',
'Remove this task from this scenario': '從這個情境中移除此任務',
'Remove': '移除',
'Removed from Group': '組員已刪除',
'Removed from Team': '組員已刪除',
'Repair': '修復',
'Repaired': '修复',
'Repeat your password': '重复您的密碼',
'Replace All': '全部取代',
'Replace if Master': '如果主要取代',
'Replace if Newer': '若較新,則取代',
'Replace with Remote': '以遠端取代',
'Replace': '取代',
'Replace/Master': '取代/主要',
'Replace/Newer': '取代/更新',
'Report Another Assessment...': '報告另一個評估...',
'Report Details': '報告詳細資料',
'Report Resource': '報告資源',
'Report Type': '報告類型',
'Report Types Include': '報告類型包括',
'Report a Problem with the Software': '回報軟體問題',
'Report added': '已新增報告',
'Report deleted': '已刪除報告',
'Report my location': '報告我的位置',
'Report that person missing': '報告的人員遺漏',
'Report the contributing factors for the current EMS status.': '報告的附加因素的現行EMS狀態。',
'Report the contributing factors for the current OR status.': '報告的附加因素的現行或狀態。',
'Report the person as found': '報告的人員發現',
'Report them as found': '它們報告發現',
'Report them missing': '它們報告遺漏',
'Report updated': '報告已更新',
'Report': '報告',
'Reported By': '報告者',
'Reporter Name': '報告名稱',
'Reporter': '報告者',
'Reporter:': '報告:',
'Reporting on the projects in the region': '報告中的專案區域',
'Reports': '報告',
'Request Added': '新增要求',
'Request Aid': '輔助請求',
'Request Canceled': '已取消申請',
'Request Detail': '要求詳細資料',
'Request Details': '要求的詳細資料',
'Request From': '要求來源',
'Request Item Details': '要求項目詳細資料',
'Request Item added': '要求新增項目',
'Request Item deleted': '要求刪除項目',
'Request Item from Available Inventory': '要求項目從可用庫存',
'Request Item updated': '要求更新項目',
'Request Item': '申請項目',
'Request Items': '申請項目',
'Request Status': '要求狀態',
'Request Type': '要求類型',
'Request Updated': '要求已更新項目',
'Request added': '新增要求',
'Request deleted': '已刪除要求',
'Request for Role Upgrade': '請求的角色升級',
'Request updated': '要求已更新項目',
'Request': '要求',
'Request, Response & Session': '要求,回應及階段作業',
'Requested By Facility': '所要求的機能',
'Requested By Site': '所要求的網站',
'Requested By Warehouse': '所要求的倉儲',
'Requested By': '申請者',
'Requested From': '要求從',
'Requested Items': '所要求的項目',
'Requested Skill Details': '所需技能細節',
'Requested Skill updated': '所需技能更新',
'Requested Skill': '所需技能',
'Requested Skills': '所需技能',
'Requested by': '要求者',
'Requested on': '要求上',
'Requested': '已要求',
'Requester': '要求者',
'Requestor': '要求者',
'Requests Management': '要求管理',
'Requests for Item': '要求的項目',
'Requests': '需求',
'Required Skill': '所需技能',
'Required by other servers.': '所需的其他伺服器。',
'Requires Login!': '需要登入!',
'Requires login': '需要登入',
'Rescue and recovery': '應急與恢復系統 (Rescue and Recovery)',
'Reset Password': '重設密碼',
'Reset form': '重設表單',
'Reset': '重設',
'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size': '調整功能:選擇您希望調整的功能,然後拖動相關的點到你想要的大小',
'Resolve Conflict': '解決衝突',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': '解決鏈接啟動一個新的畫面,有助於解決這些重複記錄和更新數據庫',
'Resolve': '解決',
'Resource Details': '資源詳細資料',
'Resource added': '新增資源',
'Resource deleted': '資源已刪除',
'Resource updated': '資源已更新',
'Resource': '資源',
'Resources': '資源',
'Respiratory Infections': '呼吸感染',
'Response Details': '回應明細',
'Response added': '新增回應',
'Response deleted': '刪除回應',
'Response updated': '回應已更新',
'Response': '回應',
'Responses': '回應',
'Restricted Access': '受限存取權',
'Restricted Use': '使用限制',
'Restrictions': '限制',
'Results': '結果',
'Resume Sync': '恢复同步',
'Retail Crime': '零售犯罪',
'Retrieve Password': '擷取密碼',
'Return to Request': '回到要求',
'Return': '返回',
'Returned From': '传回從',
'Returned': '已返回',
'Review Incoming Shipment to Receive': '檢閱送入出貨以接收',
'Rice': '每',
'Right now, your system is set default synchronization scheme. You are currently able to synchronize your server with other servers.': '現在,您的系统是設定預設配置同步化。 您目前無法同步化您的伺服器与其他伺服器。',
'Right-hand headline': '右手標題',
'Right-to-Left': '由右至左',
'Riot': '暴動',
'River Details': '金水河詳細資料',
'River added': '金水河新增',
'River deleted': '金水河刪除',
'River updated': '金水河更新',
'River': '金水河',
'Rivers': '河流',
'Road Accident': '道路事故',
'Road Closed': '道路關閉',
'Road Conditions': '道路條件',
'Road Delay': '道路延遲',
'Road Hijacking': '道路强制存取',
'Road Usage Condition': '道路使用條件',
'Role Details': '角色詳細資料',
'Role Required': '需要角色',
'Role Updated': '更新角色',
'Role added': '已新增角色',
'Role deleted': '已刪除角色',
'Role updated': '更新角色',
'Role': '角色',
'Role-based': '角色-基礎',
'Roles Permitted': '角色允許',
'Roles': '角色',
'Roof tile': '并排安設',
'Roofs, floors (vertical load)': '体型,地板(垂直載入)',
'Room Details': '教室詳細資料',
'Room added': '新增室',
'Room deleted': '教室已刪除',
'Room updated': '更新室',
'Rooms': '會議室',
'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location': '旋轉功能:選取的功能時,您要旋轉和,然后拖曳相關的點旋轉為您所需的位置',
'Row Choices (One Per Line)': '列選項一(每行)',
'Rows in table': '表格中的橫列',
'Rows selected': '已選取的列數',
'Run Functional Tests': '執行功能測試',
'Run Interval': '執行間隔',
'Running Cost': '執行成本',
'Russian': '俄國人',
'SITUATION': '狀況',
'Safe environment for vulnerable groups': '安全環境的漏洞群組',
'Safety Assessment Form': '安全評量表單',
'Safety of children and women affected by disaster': '安全的子項和婦女影响災難',
'Safety of children and women affected by disaster?': '安全的子項和婦女影响災難?',
'Sahana Administrator': 'Sahana管理者',
'Sahana Blue': 'Sahana藍色',
'Sahana Community Chat': 'Sahana 社群聊天室',
'Sahana Eden <= Other sync (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=其他同步化(Sahana Agasti, Ushahidi,等等。 )',
'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=>其他(Sahana Agasti, Ushahidi,等等。 )',
'Sahana Eden <=> Other sync (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=>其他同步化(Sahana Agasti, Ushahidi,等等。 )',
'Sahana Eden <=> Other': 'Sahana Eden <=>其他',
'Sahana Eden <=> Sahana Eden sync': 'Sahana Eden <=> Sahana Eden同步',
'Sahana Eden Disaster Management Platform': 'Sahana Eden災難管理平台',
'Sahana Eden Humanitarian Management Platform': 'Sahana Eden Humanitarian管理平台',
'Sahana Eden Website': 'Sahana Eden 網站',
'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organisations working in disaster management. The following modules are available': 'Sahana Eden 是一套救災管理網站系統,可協助救援單位進行災難管理的分工合作。 下列模組可用',
'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organisations working in disaster management.': 'Sahana Eden 是一套救災管理網站系統,可協助救援單位進行災難管理的分工合作。',
'Sahana FOSS Disaster Management System': 'Sahana FOSS災難管理系统',
'Sahana Green': 'Sahana綠色',
'Sahana Login Approval Pending': 'Sahana登入擱置核准',
'Sahana Steel': 'Sahana鋼',
'Sahana access granted': 'Sahana授予的存取權',
'Sahana has to hook to a network port other than port being used by website (normally port 80). If your firewall blocks this port you have change it to any other free port. For information on eligible ports, see': 'Sahana已連結至一个網路埠以外的端口正在使用網站(通常是埠80)。 如果您的防火牆區塊這个埠就將它變更為任何其他可用的埠。 資格的相關資訊連接埠,請参閱',
'Sahana: new request has been made. Please login to see if you can fulfil the request.': 'Sahana:新的請求已完成。 請登入以查看您是否可以滿足要求。',
'Salted Fish': '隨機fish',
'Salvage material usable from destroyed houses': '援救物料可從毀損安置',
'Salvage material usable from destroyed schools': '援救物料可從毀損學校',
'Sanitation problems': '消毒問題',
'Satellite Layer': '衛星層',
'Satellite Office': '卫星辦公室',
'Satellite': '衛星',
'Saturday': '星期六',
'Save any Changes in the one you wish to keep': '儲存任何變更在一个您要保留',
'Save': '儲存',
'Save: Default Lat, Lon & Zoom for the Viewport': '儲存:預設平面,縮放(完整的視角的',
'Saved.': '已儲存。',
'Saving...': '正在儲存...',
'Scale of Results': '小數位數的結果',
'Scanned File': '掃描檔案',
'Scenario Details': '實務詳細資料',
'Scenario added': '新增實務',
'Scenario deleted': '刪除情境',
'Scenario updated': '更新情境',
'Scenario': '實務',
'Scenarios': '實務',
'Schedule': '排程',
'Schema': '綱目',
'School Closure': '學校關閉',
'School Code': '學校代碼',
'School District Details': '學校特區詳細資料',
'School District added': '學校特區新增',
'School District deleted': '學校特區刪除',
'School District updated': '學校特區更新',
'School District': '學校特區',
'School Districts': '學校行政區',
'School Lockdown': '學校鎖定',
'School Report Details': '學校報告詳細資料',
'School Report added': '學校報告新增',
'School Report deleted': '學校報告刪除',
'School Report updated': '學校報告更新',
'School Reports will be moved to Shelter Registry as this is what they are. Rapid Assessments will be added here.': '學校將報告移動到Shelter登錄,因為這是它們。 快速評估將在此新增。',
'School Reports': '學校報告',
'School Teacher': '學校老師',
'School activities': '學校活動',
'School assistance received/expected': '學校协助/預期接收',
'School assistance': '學校协助',
'School attendance': '學校与會者',
'School destroyed': '學校損毀',
'School heavily damaged': '學校大量損壞',
'School tents received': '學校內容接收',
'School tents, source': '學校內容,來源',
'School used for other purpose': '學校用于其他用途',
'School': '學校',
'School/studying': '學校/研究',
'Schools': '學校',
'Search & List Bin Types': '搜尋清單bin類型',
'Search & List Bins': '搜尋貯存箱清單(&L)',
'Search & List Catalog': '搜尋型錄清單(&L)',
'Search & List Category': '搜尋種類清單(&L)',
'Search & List Items': '搜尋項目清單(&L)',
'Search & List Locations': '搜尋位置清單(&L)',
'Search & List Site': '搜尋網站清單(&L)',
'Search & List Sub-Category': '搜尋子類別清單',
'Search & List Unit': '搜尋和列舉單元',
'Search Activities': '搜尋活動',
'Search Activity Report': '搜尋活動報告',
'Search Addresses': '搜尋位址',
'Search Aid Requests': '搜尋輔助要求',
'Search Alternative Items': '搜尋替代項目',
'Search Assessment Summaries': '搜尋評量摘要',
'Search Assessments': '搜尋評量',
'Search Asset Assignments': '搜尋資產分派',
'Search Asset Log': '搜尋資產日誌',
'Search Assets': '搜尋資產',
'Search Baseline Type': '搜尋基準线類型',
'Search Baselines': '搜尋基準线',
'Search Brands': '搜尋品牌',
'Search Budgets': '搜尋預算',
'Search Bundles': '搜尋軟体組',
'Search Camp Services': 'Camp搜尋服務',
'Search Camp Types': 'Camp搜尋類型',
'Search Camps': '搜尋Camps',
'Search Catalog Items': '搜尋型錄項目',
'Search Catalogs': '搜尋型錄',
'Search Category<>Sub-Category<>Catalog Relation': '搜尋Category<>Sub-Category<>Catalog關系',
'Search Certificates': '搜尋凭證',
'Search Certifications': '認證搜尋',
'Search Checklists': '核對搜尋',
'Search Cluster Subsectors': '搜尋叢集Subsectors',
'Search Clusters': '搜尋叢集',
'Search Commitment Items': '搜尋項目承諾',
'Search Commitments': '搜尋Commitments',
'Search Competencies': '搜尋能力',
'Search Competency Ratings': '搜尋能力等級',
'Search Configs': '搜尋配置',
'Search Contact Information': '搜尋聯絡資訊',
'Search Contacts': '搜尋聯絡人',
'Search Course Certicates': '搜尋進程凭證',
'Search Courses': '搜尋課程',
'Search Credentials': '認證搜尋',
'Search Distribution Items': '搜尋項目分配',
'Search Distributions': '搜尋配送',
'Search Documents': '搜尋文件',
'Search Donors': '搜尋Donors',
'Search Entries': '搜尋項目',
'Search Events': '搜尋事件',
'Search Facilities': '搜尋機能',
'Search Feature Layers': '搜尋功能層',
'Search Flood Reports': '搜尋水災報告',
'Search Geonames': '搜尋GeoNames',
'Search Groups': '搜尋群組',
'Search Homes': '搜尋家庭',
'Search Hospitals': '搜尋醫院',
'Search Human Resources': '搜尋人力資源',
'Search Identity': '搜尋身分',
'Search Images': '搜尋影像',
'Search Impact Type': '搜尋影响類型',
'Search Impacts': '搜尋影响',
'Search Import Files': '搜尋匯入檔案',
'Search Incident Reports': '搜尋事件報告',
'Search Incidents': '搜尋事件',
'Search Inventory Items': '搜尋庫存項目',
'Search Inventory Stores': '儲存搜尋庫存',
'Search Inventory items': '搜尋庫存項目',
'Search Item Catalog Category(s)': '搜尋項目型錄分類',
'Search Item Catalog(s)': '搜尋項目目錄',
'Search Item Categories': '搜尋項目類別',
'Search Item Packs': '搜尋項目套件',
'Search Item Sub-Category(s)': '搜尋項目子類別(S)',
'Search Items': '搜尋項目',
'Search Job Roles': '搜尋工作角色',
'Search Keys': '搜尋關鍵字',
'Search Kits': '搜尋套件',
'Search Landmarks': '搜尋里程碑',
'Search Layers': '搜尋層',
'Search Level 1 Assessments': '搜尋層次一評量',
'Search Level 2 Assessments': '搜尋層次二評量',
'Search Level': '搜尋層級',
'Search Locations': '搜尋位置',
'Search Log Entry': '搜尋日誌項目',
'Search Map Profiles': '搜尋對映配置',
'Search Markers': '搜尋標記',
'Search Members': '搜尋成員',
'Search Membership': '搜尋成員資格',
'Search Memberships': '搜尋成員資格',
'Search Metadata': '搜尋meta資料',
'Search Missions': '搜尋任務',
'Search Need Type': '搜尋需要類型',
'Search Needs': '搜尋需求',
'Search Notes': '搜尋Notes',
'Search Offices': '搜尋辦公室',
'Search Organizations': '搜尋組織',
'Search Partners': '搜尋伙伴',
'Search Patients': '查詢病人',
'Search Peer': '搜尋同層級',
'Search Peers': '搜尋對等',
'Search Personal Effects': '搜尋个人效果',
'Search Persons': '搜尋人員',
'Search Photos': '搜尋照片',
'Search Population Statistics': '搜尋人口统計資料',
'Search Positions': '搜尋位置',
'Search Problems': '搜尋問題',
'Search Projections': '搜尋估算',
'Search Projects': '搜尋專案',
'Search Rapid Assessments': '快速搜尋評量',
'Search Received Items': '搜尋接收項目',
'Search Received Shipments': '搜尋收到出貨',
'Search Records': '搜尋記錄',
'Search Recovery Reports': '搜尋回复報告',
'Search Registations': '搜尋Registations',
'Search Registration Request': '搜尋登錄要求',
'Search Report': '搜尋報告',
'Search Reports': '搜尋報告',
'Search Request Items': '搜尋要求項目',
'Search Request': '搜尋要求',
'Search Requested Items': '搜尋所要求的項目',
'Search Requested Skills': '查詢要求技能',
'Search Requests': '搜尋需求',
'Search Resources': '搜尋資源',
'Search Responses': '搜尋回應',
'Search Rivers': '搜尋Rivers',
'Search Roles': '搜尋角色',
'Search Rooms': '搜尋檔案室',
'Search Scenarios': '搜尋實務',
'Search School Districts': '搜尋學校行政區',
'Search School Reports': '搜尋學校報告',
'Search Sections': '搜尋區段',
'Search Sectors': '搜尋煽形',
'Search Sent Items': '传送搜尋項目',
'Search Sent Shipments': '传送搜尋出貨',
'Search Service Profiles': '搜尋服務設定檔',
'Search Settings': '搜尋設定',
'Search Shelter Services': '搜尋Shelter服務',
'Search Shelter Types': '搜尋Shelter類型',
'Search Shelters': '搜尋Shelters',
'Search Shipment Transit Logs': '搜尋出貨传輸日誌',
'Search Shipment/Way Bills': '出貨/搜尋方式清單',
'Search Shipment<>Item Relation': '搜尋Shipment<>Item關系',
'Search Site(s)': '搜尋(S)',
'Search Skill Equivalences': '搜尋技能同等',
'Search Skill Provisions': '搜尋技能條款',
'Search Skill Type': '搜尋技能類型',
'Search Skill Types': '搜尋技能類型',
'Search Skill': '搜尋技能',
'Search Skills': '搜尋技能',
'Search Solutions': '搜尋解決方案',
'Search Sources': '搜尋來源',
'Search Staff Types': '搜尋人員類型',
'Search Staff or Volunteer': '搜尋人員或主動参与者',
'Search Staff': '搜尋人員',
'Search Status': '搜尋狀態',
'Search Storage Bin Type(s)': '搜尋儲存bin類型(S)',
'Search Storage Bin(s)': '搜尋儲存BIN(S)',
'Search Storage Location(s)': '搜尋儲存位置(S)',
'Search Subscriptions': '搜尋訂閱',
'Search Subsectors': '搜尋Subsectors',
'Search Support Requests': '搜尋支援要求',
'Search Tasks': '搜尋作業',
'Search Teams': '搜尋團隊',
'Search Themes': '搜尋主題',
'Search Tickets': '搜尋摘記卷',
'Search Tracks': '搜尋追蹤',
'Search Trainings': '搜尋撰文',
'Search Twitter Tags': '搜尋Twitter標籤',
'Search Units': '搜尋單位',
'Search Updates': '搜尋更新',
'Search Users': '搜尋使用者',
'Search Vehicle Details': '查詢交通工具細節',
'Search Vehicles': '查詢交通工具',
'Search Volunteer Availability': '搜尋自愿可用性',
'Search Volunteer Registrations': '搜尋自愿登錄',
'Search Volunteers': '搜尋志愿者',
'Search Warehouses': '搜尋倉庫',
'Search and Edit Group': '搜尋及編輯群組',
'Search and Edit Individual': '搜尋及編輯个別',
'Search by ID Tag': '搜尋依ID標籤',
'Search for Items': '搜尋項目',
'Search for Staff or Volunteers': '搜尋人員或志愿者',
'Search for a Hospital': '搜尋一个醫院',
'Search for a Location by name, including local names.': '搜尋位置名稱,包括本端名稱。',
'Search for a Location': '搜尋位置',
'Search for a Person': '人員查詢',
'Search for a Project': '搜尋一个專案',
'Search for a Request': '搜尋要求',
'Search for a shipment by looking for text in any field.': '搜尋一个出貨尋找文字中的任何欄位。',
'Search for a shipment received between these dates': '搜尋的貨物接收在這些日期之間',
'Search for a vehicle by text.': '以文字查詢交通工具.',
'Search for an Organization by name or acronym': '搜尋的組織名稱或縮寫',
'Search for an Organization by name or acronym.': '搜尋的組織名稱或縮寫。',
'Search for an asset by text.': '搜尋資產的文字。',
'Search for an item by category.': '搜尋項目類別。',
'Search for an item by Year of Manufacture.': '以製造日期查詢項目.',
'Search for an item by text.': '搜尋項目的文字。',
'Search for asset by country.': '搜尋資產的国家。',
'Search for office by country.': '搜尋的辦公室国家/地區。',
'Search for office by organization.': '搜尋的辦公室組織。',
'Search for office by text.': '搜尋的辦公室文字。',
'Search for warehouse by country.': '搜尋倉儲的国家/地區。',
'Search for warehouse by organization.': '搜尋倉儲的組織。',
'Search for warehouse by text.': '搜尋倉儲的文字。',
'Search here for a person record in order to:': '搜尋這裡的一个个人記錄中,以便:',
'Search messages': '搜尋訊息',
'Search': '搜尋',
'Searching for different groups and individuals': '搜尋不同的群組及个体',
'Secondary Server (Optional)': '次要伺服器(選用)',
'Seconds must be a number between 0 and 60': '秒必须是〇和60之間的數字',
'Seconds must be between 0 and 60': '秒必须在〇和60之間',
'Section Details': '區段詳細資料',
'Section added': '新增區段',
'Section deleted': '刪除區段',
'Section updated': '更新區段',
'Sections': '區段',
'Sector Details': '行業詳細資料',
'Sector added': '新增行業',
'Sector deleted': '刪除磁區',
'Sector updated': '行業更新',
'Sector': '區塊',
'Sector(s)': '磁區(S)',
'Sector(s):': '類別(秒):',
'Sectors': '磁區',
'Security Policy': '安全原則 (security policy)',
'Security Status': '安全狀態',
'Security problems': '安全問題',
'Security': '安全',
'See All Entries': '查看所有項目',
'See all': '請參閱全部',
'See unassigned recovery requests': '請参閱未回复要求',
'Seen': '看到',
'Select 2 potential locations from the dropdowns.': '選取二个可能位置的清單。',
'Select Items from the Request': '選取項目從要求',
'Select Items from this Inventory': '選取項目從這个資產',
'Select Language': '選取語言',
'Select Photos': '選取照片',
'Select a location': '選取位置',
'Select a question from the list': '從清單中選取一个問題',
'Select a range for the number of total beds': '選取一个范圍的總數beds',
'Select all that apply': '選取所有適用的',
'Select an Organization to see a list of offices': '選取組織才能見到一份清單的辦公室',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': '選取重疊的評估和活動相關的每一个需要識別填補空白區域。',
'Select the person assigned to this role for this project.': '選取人員指派給這个角色適用于這个專案。',
'Select the person associated with this scenario.': '選擇相關的聯絡人.',
'Select to show this configuration in the Regions menu.': '選取以顯示此配置中的功能表。',
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': '選取是否要使用數据機, Tropo或其他的閘道传送SMS',
'Selects whether to use the gateway or the Modem for sending out SMS': '選取是否要使用"閘道"或數据的传送SMS',
'Self Registration': '自動登錄',
'Self-care': '自我管理',
'Self-registration': '自我登記',
'Send Alerts using Email &/or SMS': '透過電子郵件和/或簡訊發送通知',
'Send Commitment as Shipment': '传送承諾為出貨',
'Send Items': '传送項目',
'Send Mail': '傳送郵件',
'Send Message': '傳送訊息',
'Send New Shipment': '传送新出貨',
'Send Notification': '傳送通知',
'Send Shipment': '传送出貨',
'Send a message to this person': '传送訊息給這个人',
'Send a message to this team': '传送訊息至這个團隊',
'Send from %s': '传送從%s',
'Send message': '傳送訊息',
'Send new message': '传送新訊息',
'Send': '傳送',
'Sends & Receives Alerts via Email & SMS': '透過電子郵件和簡訊收發通知',
'Senior (50+)': '年長者 (65+)',
'Sensitivity': '靈敏度',
'Sent By Person': '传送人員',
'Sent By': '寄件者',
'Sent Item Details': '传送項目詳細資料',
'Sent Item deleted': '传送項目刪除',
'Sent Item updated': '传送更新項目',
'Sent Shipment Details': '传送出貨詳細資料',
'Sent Shipment canceled and items returned to Inventory': '传送出貨取消,退回至庫存項目',
'Sent Shipment canceled': '传送出貨取消',
'Sent Shipment updated': '传送更新出貨',
'Sent Shipments': '传送出貨',
'Sent': '已送出',
'Separate latrines for women and men': '个別latrines的男人或婦女,老人',
'Separated children, caregiving arrangements': '區隔子項, caregiving协議',
'Serial Number': '序號',
'Series': '系列',
'Server': '伺服器 (server)',
'Service Catalogue': '服務型錄',
'Service or Facility': '服務或機能',
'Service profile added': '新增服務設定檔',
'Service profile deleted': '服務設定檔刪除',
'Service profile updated': '服務設定檔更新',
'Service': '服務程式',
'Services Available': '服務可用',
'Services': '服務',
'Set Base Site': '設定基本網站',
'Set By': '設定者',
'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': '設為True,可允許編輯這个層次的位置階層的使用者不MapAdmins。',
'Setting Details': '設定明細',
'Setting added': '新增設定',
'Setting deleted': '設定已經刪除',
'Setting updated': '更新設定',
'Settings updated': '已更新設定',
'Settings were reset because authenticating with Twitter failed': '設定已重設,因為鉴別Twitter失敗',
'Settings which can be configured through the web interface are available here.': '設定可配置透過Web介面可用在這裡。',
'Settings': '設定',
'Severe': 'severe',
'Severity': '嚴重性',
'Share a common Marker (unless over-ridden at the Feature level)': '共用一个共同的標記(除非進行置換在特性層次)',
'Shelter & Essential NFIs': 'Shelter &重要NFIs',
'Shelter Details': 'Shelter詳細資料',
'Shelter Name': 'Shelter名稱',
'Shelter Registry': '庇護所登錄',
'Shelter Service Details': 'Shelter服務詳細資料',
'Shelter Service added': 'Shelter服務新增',
'Shelter Service deleted': 'Shelter服務刪除',
'Shelter Service updated': 'Shelter服務更新',
'Shelter Service': 'Shelter服務',
'Shelter Services': 'Shelter服務',
'Shelter Type Details': 'Shelter類型詳細資料',
'Shelter Type added': 'Shelter新增類型',
'Shelter Type deleted': 'Shelter刪除類型',
'Shelter Type updated': 'Shelter更新類型',
'Shelter Type': 'Shelter類型',
'Shelter Types and Services': 'Shelter類型和服務',
'Shelter Types': 'Shelter類型',
'Shelter added': 'Shelter新增',
'Shelter deleted': 'Shelter刪除',
'Shelter updated': 'Shelter更新',
'Shelter': '庇護所',
'Shelter/NFI Assistance': 'Shelter/NFI协助',
'Shelter/NFI assistance received/expected': 'Shelter/NFI协助/預期接收',
'Shelters': '庇護所',
'Shipment Created': '出貨建立',
'Shipment Details': '貨運詳細資料',
'Shipment Items received by Inventory': '出貨項目到庫存',
'Shipment Items sent from Inventory': '出貨項目传送從庫存',
'Shipment Items': '貨運項目',
'Shipment Transit Log Details': '出貨传輸日誌詳細資料',
'Shipment Transit Log added': '出貨传輸日誌添加',
'Shipment Transit Log deleted': '出貨传輸日誌刪除',
'Shipment Transit Log updated': '出貨運送日誌更新',
'Shipment Transit Logs': '出貨運送日誌',
'Shipment to Send': '出貨以传送',
'Shipment/Way Bill added': '已新增出貨/提單',
'Shipment/Way Bills Details': '出貨/提單詳細資料',
'Shipment/Way Bills deleted': '已刪除出貨/提單',
'Shipment/Way Bills updated': '已更新出貨/提單',
'Shipment/Way Bills': '出貨/提單',
'Shipment<>Item Relation added': '貨運<>新增項目關係',
'Shipment<>Item Relation deleted': '貨運<>刪除項目關係',
'Shipment<>Item Relation updated': '貨運<>更新項目關係',
'Shipment<>Item Relations Details': '貨運<>項目關係詳細資料',
'Shipment<>Item Relations': '貨運<>項目關係',
'Shipments To': '出貨至',
'Shipments': '貨物',
'Short Assessment': '短評量',
'Short Description': '簡要說明',
'Show Checklist': '顯示清單',
'Show Details': '顯示詳細資料',
'Show Map': '顯示地圖',
'Show Region in Menu?': '顯示區域功能?',
'Show on Map': '顯示在對映上',
'Show on map': '顯示在對映上',
'Sign in': '登入',
'Sign-in with OpenID:': '以 OpenID 登入:',
'Sign-up as a volunteer': '註冊為一个主動',
'Sign-up for Account': 'A}{\b\f4\fs20\\cf13账户',
'Sign-up succesful - you should hear from us soon!': '註冊成功-我們很快會與你聯絡!',
'Sindhi': '信德文',
'Single PDF File': '單一 PDF 檔案',
'Site Address': '站台位址',
'Site Administration': '網站管理',
'Site Description': '場所說明',
'Site Details': '網站詳細資料',
'Site ID': '網站 ID',
'Site Location Description': '網站位置說明',
'Site Location Name': '站台位置名稱',
'Site Manager': '網站管理',
'Site Name': '網站名稱',
'Site added': '新增網站',
'Site deleted': '刪除站點',
'Site updated': '更新站點',
'Site': '網站',
'Site/Warehouse': '網站/倉儲',
'Sites': '場所',
'Situation Awareness & Geospatial Analysis': '狀湟狀態& Geospatial分析',
'Situation Report': '報告狀湟',
'Situation': '狀況',
'Sketch': '概略圖',
'Skill Catalog': '技能型錄',
'Skill Details': '技能詳細資料',
'Skill Equivalence Details': '技能等值詳細資料',
'Skill Equivalence added': '技能新增等值',
'Skill Equivalence deleted': '技能刪除等值',
'Skill Equivalence updated': '技能等值更新',
'Skill Equivalence': '等值技能',
'Skill Equivalences': '同等技能',
'Skill Provision Catalog': '技能供應型錄',
'Skill Provision Details': '技能供應詳細資料',
'Skill Provision added': '技能供應新增',
'Skill Provision deleted': '技能供應刪除',
'Skill Provision updated': '技能供應更新',
'Skill Provision': '供應技能',
'Skill Provisions': '技術條款',
'Skill Status': '技能狀態',
'Skill TYpe': '技術類型',
'Skill Type Catalog': '型錄技術類型',
'Skill Type Details': '技術類型詳細資料',
'Skill Type added': '添加技術類型',
'Skill Type deleted': '刪除技術類型',
'Skill Type updated': '更新技術類型',
'Skill Type': '技術類型',
'Skill Types': '技能類型',
'Skill added to Request': '技能已新增至需求',
'Skill added': '添加技能',
'Skill deleted': '刪除技能',
'Skill removed from Request': '技能已從需求中移除',
'Skill removed': '技能已移除',
'Skill updated': '技術更新',
'Skill': '技能',
'Skills Catalog': '技能型錄',
'Skills Management': '技能管理',
'Skills': '技術',
'Slope failure, debris': '斜率失敗,碎屑',
'Small Trade': '小型交易',
'Smoke': '煙霧',
'Snapshot Report': 'Snapshot 報告',
'Snapshot': '快照',
'Snow Fall': '除雪落',
'Social': '社會',
'Soil bulging, liquefaction': '土壤膨脹, liquefaction',
'Solid waste': '實心廢棄',
'Solution Details': '解決方案明細',
'Solution Item': '解决方案項目',
'Solution added': '新增解决方案',
'Solution deleted': '刪除解决方案',
'Solution updated': '更新解决方案',
'Solution': '解决方案',
'Solutions': '解決方案',
'Some': '部分',
'Sorry - the server has a problem, please try again later.': '抱歉-伺服器發生問題,請稍后再試一次。',
'Sorry that location appears to be outside the area of the Parent.': '抱歉該位置可用區域之外的母項。',
'Sorry that location appears to be outside the area supported by this deployment.': '抱歉該位置可用請在區域外支援這个部署。',
'Sorry, I could not understand your request': '抱歉,我不瞭解您的請求',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': '抱歉,只有在使用者与MapAdmin角色允許建立位置群組。',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': '抱歉,只有在使用者与MapAdmin角色允許編輯位置',
'Sorry, something went wrong.': '很抱歉,發生錯誤。',
'Sorry, that page is forbidden for some reason.': '抱歉,該頁面是禁止的某些原因。',
'Sorry, that service is temporary unavailable.': '抱歉,該服務暫時無法使用。',
'Sorry, there are no addresses to display': '抱歉,沒有位址來顯示',
'Source Details': '來源詳細資料',
'Source ID': '來源 ID',
'Source Time': '時間來源',
'Source Type': '來源類型',
'Source added': '新增來源',
'Source deleted': '刪除來源',
'Source of Information': '來源的資訊',
'Source updated': '更新來源',
'Source': '原始檔',
'Sources of income': '來源的收入',
'Sources': '來源',
'Space Debris': '空間碎片',
'Spanish': '西班牙文',
'Special Ice': '特殊ICE',
'Special Marine': '特殊MARINE',
'Special needs': '特殊需求',
'Specialized Hospital': '特殊化醫院',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': '特定區域(例如建置/室温)的位置內的此人員/群組是出現。',
'Specific locations need to have a parent of level': '需要有一个特定位置的母項層次',
'Specify a descriptive title for the image.': '指定的叙述性標題的影像。',
'Specify the bed type of this unit.': '指定的平台類型的裝置。',
'Specify the minimum sustainability in weeks or days.': '指定的最小永續性以週為單位"或"日"。',
'Specify the number of available sets': '指定數目的組可用',
'Specify the number of available units (adult doses)': '指定的數目可用單元(成年人劑量)',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': '指定的數目可用單元(公升)的鳴鐘-Lactate或相等的解决方案',
'Specify the number of sets needed per 24h': '指定的數目集需要每小時',
'Specify the number of units (adult doses) needed per 24h': '指定的單位數目(成年人劑量)需要每小時',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': '指定的單位數目(公升)的鳴鐘-Lactate或相等的解决方案需要每小時',
'Spherical Mercator?': '描繪成球形Mercator?',
'Spreadsheet Importer': '匯入試算表',
'Spreadsheet uploaded': '上传試算表',
'Staff & Volunteers': '人員和志願者',
'Staff 2': '員工二',
'Staff Details': '人員明細',
'Staff ID': '人員ID',
'Staff Member Details': '人員成員詳細資料',
'Staff Members': '人員成員',
'Staff Record': '人員記錄',
'Staff Type Details': '員工類型詳細資料',
'Staff Type added': '人員新增類型',
'Staff Type deleted': '人員刪除類型',
'Staff Type updated': '人員更新類型',
'Staff Types': '員工類型',
'Staff added': '新增人員',
'Staff and Volunteers': '人員和志愿者',
'Staff deleted': '人員刪除',
'Staff member added': '新增人員',
'Staff present and caring for residents': '人員存在与維護的居民',
'Staff updated': '更新人員',
'Staff': '人員',
'Staff2': '員工 2',
'Staffing': '人員配置',
'Stairs': '階梯',
'Start Date': '開始日期',
'Start date': '開始日期 (start date)',
'Start of Period': '開始的期間',
'Start using your OpenID': '開始使用您的 OpenID',
'State': '省 (縣)',
'Stationery': '信笺',
'Status Report': '狀態報告',
'Status Update': '狀態更新',
'Status Updated': '狀態更新',
'Status added': '新增狀態',
'Status deleted': '刪除狀態',
'Status of clinical operation of the facility.': '狀態的臨床作業的機能。',
'Status of general operation of the facility.': '狀態的一般作業的機能。',
'Status of morgue capacity.': '狀態的morgue容量。',
'Status of operations of the emergency department of this hospital.': '狀態的作業的緊急部門的這个醫院。',
'Status of security procedures/access restrictions in the hospital.': '狀態的安全程序/存取限制在醫院。',
'Status of the operating rooms of this hospital.': '狀態的作業的會談室這个醫院。',
'Status updated': '狀態更新',
'Status': '狀態',
'Steel frame': '鋼框架',
'Stolen': '已遭竊',
'Storage Bin Details': '存儲Bin詳細資料',
'Storage Bin Number': '存儲Bin號碼',
'Storage Bin Type Details': '存儲Bin類型詳細資料',
'Storage Bin Type added': '存儲Bin新增類型',
'Storage Bin Type deleted': '存儲Bin類型刪除',
'Storage Bin Type updated': '存儲Bin更新類型',
'Storage Bin Type': 'Bin存儲類型',
'Storage Bin Types': '存儲Bin類型',
'Storage Bin added': '存儲Bin新增',
'Storage Bin deleted': '存儲Bin刪除',
'Storage Bin updated': '儲存更新bin',
'Storage Bin': '儲存體 Bin',
'Storage Bins': '存儲Bin',
'Storage Location Details': '儲存体位置詳細資料',
'Storage Location ID': '儲存体位置ID',
'Storage Location Name': '儲存体位置名稱',
'Storage Location added': '儲存体位置新增',
'Storage Location deleted': '儲存体位置刪除',
'Storage Location updated': '儲存体位置更新',
'Storage Location': '儲存體位置',
'Storage Locations': '儲存體位置',
'Store spreadsheets in the Eden database': '儲存試算表中的Eden資料庫',
'Storeys at and above ground level': 'Storeys在及以上接地層次',
'Storm Force Wind': '暴雨强制wind',
'Storm Surge': '暴雨突波',
'Street (continued)': '街道(續)',
'Street Address': '地址',
'Street': '街道',
'Strong Wind': 'strong wind',
'Structural Hazards': '結构性危害',
'Structural': '結構性',
'Style Field': '樣式欄位',
'Style Values': '樣式值',
'Sub Category': '子種類',
'Sub-type': '子類型',
'SubType': '子類型',
'Subject': '主旨',
'Submission successful - please wait': '提交成功-請稍候',
'Submission successful - please wait...': '提交成功-請稍候。',
'Submit New (full form)': '提交新的(完整形式)',
'Submit New (triage)': '提交新(分類)',
'Submit New': '提交新的',
'Submit a request for recovery': '提交要求的回复',
'Submit new Level 1 assessment (full form)': '提交新的層次一Assessment完整形式)',
'Submit new Level 1 assessment (triage)': '提交新層次一評量(分類)',
'Submit new Level 2 assessment': '提交新層次二評量',
'Submit': '確認送出',
'Submitting information about the individual such as identification numbers, physical appearance, last seen location, status, etc': '提交个人的相關資訊(例如識別號碼),實体外觀,前次看到的位置,狀態等',
'Subscription Details': '訂閱詳細資料',
'Subscription added': '已新增訂閱',
'Subscription deleted': '刪除訂閱',
'Subscription updated': '更新訂閱',
'Subscriptions': '訂閱',
'Subsector Details': 'Subsector詳細資料',
'Subsector added': '界別分組已新增',
'Subsector deleted': '界別分組已刪除',
'Subsector updated': 'Subsector更新',
'Subsector': '界別分組',
'Subsistence Cost': '補貼成本',
'Suburb': '西郊',
'Sufficient care/assistance for chronically ill': '足够的管理/的幫助chronically不正確',
'Suggest not changing this field unless you know what you are doing.': '建議不變更這个欄位,除非您知道您要做。',
'Summary by Administration Level': '摘要(依管理層次',
'Summary': '摘要',
'Sunday': '星期日',
'Supervisor': '監督者',
'Supply Chain Management': '供應鏈管理',
'Support Request': '支援要求',
'Support Requests': '支援要求',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': '支援决策的大型群組的危機管理專家來幫助群組建立排序清單。',
'Sure you want to delete this object?': '確定要刪除嗎?',
'Survey Answer Details': '調查回答詳細資料',
'Survey Answer added': '調查回答新增',
'Survey Answer deleted': '調查回答刪除',
'Survey Answer updated': '調查回答更新',
'Survey Answer': '調查回答',
'Survey Module': '調查模組',
'Survey Name': '意見調查名稱',
'Survey Question Details': '調查問題詳細資料',
'Survey Question Display Name': '調查問題顯示名稱',
'Survey Question added': '調查問題添加',
'Survey Question deleted': '調查問題刪除',
'Survey Question updated': '調查問題更新',
'Survey Question': '調查問題',
'Survey Section Details': '調查區段詳細資料',
'Survey Section Display Name': '調查區段顯示名稱',
'Survey Section added': '調查區段新增',
'Survey Section deleted': '調查刪除區段',
'Survey Section updated': '調查區段更新',
'Survey Section': '意見調查區段',
'Survey Series Details': '調查系列詳細資料',
'Survey Series Name': '調查系列名稱',
'Survey Series added': '調查系列新增',
'Survey Series deleted': '調查系列刪除',
'Survey Series updated': '調查系列更新',
'Survey Series': '調查系列',
'Survey Template Details': '調查范本詳細資料',
'Survey Template added': '調查范本新增',
'Survey Template deleted': '調查刪除范本',
'Survey Template updated': '調查更新范本',
'Survey Template': '意見調查範本',
'Survey Templates': '意見調查範本',
'Switch this on to use individual CSS/Javascript files for diagnostics during development.': '這个交換器上使用个別CSS/JavaScript檔在開發期間進行診斷。',
'Symbology': '符號學',
'Sync Conflicts': '同步冲突',
'Sync History': '同步歷程',
'Sync Now': '立即同步',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': '同步伙伴的實例或同層級(SahanaEden, SahanaAgasti, Ushahidi,等等。 您要同步的資訊。 按一下鏈結,右邊的移至"頁面,您可以在其中新增同步伙伴,搜尋同步伙伴并對其進行修改。',
'Sync Partners': '同步伙伴',
'Sync Password': '同步密碼',
'Sync Policy': '同步原則',
'Sync Pools are groups of peers (SahanaEden & SahanaAgasti instances) willing to sync with each other. You can subscribe to different groups, define new groups and dicsover the existing ones. Click the link on the right to go to Sync Pools page.': '同步儲存區群組的對等(SahanaEden和SahanaAgasti實例)想要同步的每一个"其他"。 您可以訂閱不同的群組,定义新的群組及dicsover現有的范本。 上的鏈結,按一下滑鼠右鍵以跳至同步儲存區頁面。',
'Sync Pools': '同步儲存區',
'Sync Schedule': '同步排程',
'Sync Schedules': '同步排程',
'Sync Settings updated': '同步更新設定',
'Sync Settings': '同步設定',
'Sync Username': '同步使用者名稱',
'Sync process already started on': '同步程序已啟動',
'Synchronisation - Sync Now': '同步化-立即同步',
'Synchronisation History': '同步化歷程',
'Synchronisation': '同步化',
'Synchronization Conflicts': '同步化冲突',
'Synchronization Details': '同步化詳細資料',
'Synchronization History': '同步化歷程',
'Synchronization Peers': '同步化對等',
'Synchronization Settings': '同步化設定',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the automatic synchronization feature of SahanaEden': '可讓您同步化共用資料,您可以与其他更新您自己的資料庫与最新資料來自其他對等。 這个頁面提供您的相關資訊,請使用"自動同步處理功能的SahanaEden',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': '可讓您同步化共用資料,您可以与其他更新您自己的資料庫与最新資料來自其他對等。 這个頁面提供您的相關資訊,請使用"同步化的功能Sahana Eden',
'Synchronization not configured': '未配置同步化',
'Synchronization not configured.': '未配置同步化。',
'Synchronization settings updated': '同步化設定更新',
'Synchronization': '同步化',
'Syncronisation History': '同步歷程',
'Syncronisation Schedules': '同步排程',
'System allows the General Public to Report Incidents & have these Tracked.': '系统容許的一般公用報告事件和這些追蹤。',
'System allows the tracking & discovery of Items stored in Locations.': '可讓系统追蹤及探索項目中所儲存的位置。',
'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': '系统是一个中央线上資料庫所有的組織,釋放工作,政府代理和camp站點,以取代人員可以將提供的輔助的需求。 它可讓使用者配置的可用資源以滿足需求效益与效率。',
'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': '系统追蹤所有的志愿者工作在災難區域。 它不只會擷取工作區所作用中,但也會擷取的相關資訊的服務范圍,它們提供在各个領域。',
'Table name': '表格名稱',
'Tags': '標籤',
'Take shelter in place or per <instruction>': '採取shelter處或每<instruction>',
'Task Details': '作業詳細資料',
'Task List': '作業清單',
'Task Status': '作業狀態',
'Task added': '新增作業',
'Task deleted': '作業已刪除',
'Task status': '作業狀態',
'Task updated': '作業已更新',
'Tasks': '作業',
'Team Description': '團隊說明',
'Team Details': '團隊詳細資料',
'Team Head': '團隊負責人',
'Team ID': '團隊 ID',
'Team Id': '團隊 ID',
'Team Leader': '團隊領導人',
'Team Member added': '新增團隊成員',
'Team Members': '團隊成員',
'Team Name': '團隊名稱',
'Team Type': '團隊類型',
'Team added': '新增團隊',
'Team deleted': '已刪除團隊',
'Team updated': '更新團隊',
'Team': '團隊',
'Teams': '團隊',
'Technical testing only, all recipients disregard': '技術僅測試,所有收件者不',
'Telecommunications': '電信',
'Telephone': '電話',
'Telephony': '電話系統',
'Temp folder %s not writable - unable to apply theme!': '暫存資料夾%無法寫入-無法套用布景主題!',
'Template file %s not readable - unable to apply theme!': '范本檔案%無法讀取-無法套用布景主題!',
'Templates': '範本',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': '術語的第5層內的国家管理部門(例如表决或郵遞區號分區)。 這个層次不常使用。',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': '術語的第4層內的国家管理部門(例如,村落,芳鄰"或Precinct)。',
'Term for the primary within-country administrative division (e.g. State or Province).': '術語的主要內的国家管理部門(例如州或省)。',
'Term for the secondary within-country administrative division (e.g. District or County).': '術語的次要內的国家管理部門(例如,地區或縣)。',
'Term for the third-level within-country administrative division (e.g. City or Town).': '術語的第3層內的国家管理部門(例如"城市"或"大街)。',
'Term for the top-level administrative division (i.e. Country).': '術語的最上層管理部門(例如国家)。',
'Territorial Authority': '完整省權限',
'Terrorism': 'terrorism',
'Tertiary Server (Optional)': '層伺服器(選用)',
'Test Results': '測試結果',
'Text Colour for Text blocks': '文字顏色的文字區塊',
'Text Direction': '文字方向',
'Text before each Text Field (One per line)': '文字之前每个文字欄位(一每行)',
'Text in Message': '訊息中的文字',
'Text in Message:': '訊息文字:',
'Text': '文字',
'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': '感謝您驗證您的電子郵件。 您的使用者账户仍在擱置中的核准的系统管理者 (%s).,您將得到時,以電子郵件通知您的账户已被啟用。',
'Thanks for your assistance': '感謝您的恊助',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': "The query is a condition like db.table1.field1=='value'. Something like db.table1.field1 == db.table2.field2 results in a SQL JOIN.",
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': "The query is a condition like db.table1.field1=='value'. Something like db.table1.field1==db.table2.field2 results in a SQL JOIN.",
'The Area which this Site is located within.': '這个網站區域',
'The Assessments module allows field workers to send in assessments.': '評量模組容許現场工人传送中評估。',
'The Author of this Document (optional)': '作者的這份文件(可選)',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': '建置Asssesments模組容許建置安全要評估,例如,在一个諸如。',
'The Camp this Request is from': 'Camp在這个要求中',
'The Camp this person is checking into.': 'Camp在此人員檢查。',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': '現行位置的人員/群組,它可以是一般(報告)或精確(用于顯示上一个)。 輸入幾个字元搜尋可用的位置。',
'The District for this Report.': '特區的這份報告。',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': '電子郵件位址所核准要求传送(通常,這將是一个郵件,而不是个人)。 如果此欄位空白,則要求會自動核准網域是否相符。',
'The Group whose members can edit data in this record.': '的群組成員可以編輯此記錄中的數据。',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': '事件報告系统可讓一般公用報告事件和這些追蹤。',
'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).': '位置的站點,它可以是一般的報告)或精確(用于顯示上一个)。',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': '位置的人員有來自,它可以是一般的報告)或精確(用于顯示上一个)。 輸入幾个字元搜尋可用的位置。',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': '位置的人員進入,可為一般(報告)或精確(用于顯示上一个)。 輸入幾个字元搜尋可用的位置。',
'The Media Library provides a catalogue of digital media.': '媒体庫提供一个型錄的數位媒体。',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': '传訊模組是主要的通訊中心的Sahana系统。 它是用來传送警示及/或訊息使用SMS和電子郵件給不同的群組及个人之前,期間和之后發生。',
'The Office this record is associated with.': '辦公室的此記錄的關聯。',
'The Organization Registry keeps track of all the relief organizations working in the area.': '組織登錄記錄的所有釋放組織工作的范圍內。',
'The Organization Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '組織登錄記錄的所有釋放組織工作在災難區域。 它不只會擷取工作區所作用中,但也會擷取的相關資訊范圍的專案會提供每一个區域。',
'The Organization this record is associated with.': '組織此記錄的關聯。',
'The Organization which is funding this Activity.': '組織是資金此活動。',
'The Person currently filling this Role.': '人員目前填寫這个角色。',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': '專案追蹤模組可建立的活動,以滿足間隙需要評估。',
'The Rapid Assessments Module stores structured reports done by Professional Organizations.': '快速評估模組儲存結构化報告來完成專業組織。',
'The Request this record is associated with.': '要求此記錄的關聯。',
'The Role this person plays within this Office/Project.': '此人員的角色內扮演這个辦事處/專案。',
'The Role this person plays within this hospital.': '此人員的角色內扮演這个醫院。',
'The Role to which this Role reports.': '這个角色的角色的報告。',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'Shelter登錄的shelters及儲存追蹤所有的基本相關資訊。 它与其他模組追蹤人員相關聯的shelter,可用的服務等等。',
'The Shelter this Request is from (optional).': '在Shelter這个要求(選用)。',
'The Shelter this Request is from': '在Shelter這个要求從',
'The Shelter this person is checking into.': 'Shelter此人員的檢查。',
'The Source this information came from.': '來源這个資訊來源。',
'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': '的URL GetCapabilities的WMS服務層您要存取透過對映。',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'URL的GetCapabilities頁面的一个網路對映服務(WMS)層您要使用透過瀏览器"畫面的"對映。',
'The URL of your web gateway without the post parameters': 'Web閘道的URL不POST参數',
'The URL to access the service.': '的URL來存取服務。',
'The Unique Identifier (UUID) as assigned to this facility by the government.': '唯一ID (UUID)作為指派給這个機能的政府。',
'The area is': '區域是',
'The attribute which is used for the title of popups.': '屬性用于標題的蹦現畫面。',
'The attribute within the KML which is used for the title of popups.': '屬性在KML用于標題的蹦現畫面。',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': '屬性(S)在KML用于檢索的蹦現畫面。 (屬性)之間使用空格',
'The body height (crown to heel) in cm.': '主体高度(繼位者的傾斜)在CM中。',
'The category of the Item.': '種類的項目。',
'The contact person for this organization.': '聯絡人的此組織。',
'The country the person usually lives in.': '聯絡人日常居住的國家.',
'The default Organization for whom this person is acting.': '預設組織給誰此人員是處理。',
'The default Organization for whom you are acting.': '預設組織您正為其處理。',
'The default policy for data import from this peer.': '預設原則的資料從這个同層級。',
'The descriptive name of the peer.': '同層級的叙述名稱。',
'The duplicate record will be deleted': '重复的記錄會被刪除',
'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.': '輸入的單元鏈結至此單位。 例如:如果您輸入的M計量,然后選擇公里(如果有的話),然后輸入值0.001作為multiplicator。',
'The first or only name of the person (mandatory).': '聯絡人的名字 (必填).',
'The following modules are available': '下列模組可用',
'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'URL的格式http://your/web/map/service?service=WMS&request=GetCapabilities的位置/web/對映/服務代表的URL路迳WMS。',
'The hospital this record is associated with.': '醫院此記錄的關聯。',
'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.': '項目是指定要传送給特定專案", "人口",村落或其他earmarking的"捐献"之類的授權代碼。',
'The language to use for notifications.': '使用的語言的通知。',
'The language you wish the site to be displayed in.': '語言,您希望將網站顯示于中。',
'The last known location of the missing person before disappearance.': '在最后一个已知位置遺漏的人,才disappearance。',
'The last known location of the missing person.': '在最后一个已知位置遺漏的人員。',
'The length is': '的長度是',
'The list of Brands are maintained by the Administrators.': '清單的品牌所維護的管理者。',
'The list of Catalogs are maintained by the Administrators.': '型錄清單所維護的管理者。',
'The list of Item categories are maintained by the Administrators.': '清單中的項目類別所維護的管理者。',
'The map will be displayed initially with this latitude at the center.': '地圖會顯示最初与此緯度在中心。',
'The map will be displayed initially with this longitude at the center.': '圖表會被最初顯示与此經度中心。',
'The minimum number of features to form a cluster.': '最小數目的功能,以形成叢集。',
'The name to be used when calling for or directly addressing the person (optional).': '致電或直呼聯絡人時所用的名字 (非必填).',
'The next screen will allow you to detail the number of people here & their needs.': '下一個畫面可讓你詳細記載這裡的人數以及他們的需要',
'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...': '下一个畫面可讓您輸入的詳細清單項目和數量,如果適當的話,在。',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': '測量單位的數量的替代項目等于一的測量單位的項目',
'The number of pixels apart that features need to be before they are clustered.': '像素數目以外的功能需要之前形成叢集。',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': '數目并排的周圍顯示對映至下載。 〇,表示載入頁面第1更快速,數字越高,表示后續分割窗格會較快。',
'The person at the location who is reporting this incident (optional)': '人員的位置上使用報告此事件(可選)',
'The person reporting about the missing person.': '人員報告關于遺漏的人員。',
'The person reporting the missing person.': '人員報告遺漏的人員。',
'The post variable containing the phone number': 'POST變數含有的電話號碼',
'The post variable on the URL used for sending messages': 'POST變數的URL用于传送訊息',
'The post variables other than the ones containing the message and the phone number': '變數POST以外的項目包含的訊息及電話號碼',
'The request this record is associated with.': '要求此記錄的關聯。',
'The scanned copy of this document.': '這份文件的掃描。',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': '序列埠的數据機巳連接到/ dev/ttyUSB0等在Linux和COM1, COM2,等等Windows',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': '伺服器未收到即時回應從另一个伺服器的存取,以填滿所要求的瀏览器。',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': '伺服器收到不正確的回應從另一个伺服器的存取,以填滿所要求的瀏览器。',
'The simple policy allows anonymous users to Read & registered users to Edit. The full security policy allows the administrator to set permissions on individual tables or records - see models/zzz.py.': '在"簡易"原則允許匿名使用者讀取和註冊用户進行編輯。 ',
'The site where this position is based.': '此位置的场所的基礎。',
'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': '人員responsibile的設備可以提出要求,以取得协助。 承諾可以Zh這些要求,但要求會保持開啟狀態,直到要求者確認的要求已完成。',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': '主題的事件不再對一个威胁或問題,并為任何后續動作的說明<instruction>',
'The time at which the Event started.': '的時間啟動事件。',
'The title of the WMS Browser panel in the Tools panel.': '標題的WMS瀏览器"中的"在"工具"畫面。',
'The token associated with this application on': '相關聯的記號与這个應用程式上',
'The unique identifier of the peer. Leave blank if the peer is no Sahana Eden instance, it will be auto-assigned in that case.': '唯一ID的同層級。 保留為空白(如果沒有同層級Sahana Eden實例,它將會被自動指派的案例。',
'The unique identifier which identifies this instance to other instances.': '唯一ID識別此實例到其他實例。',
'The way in which an item is normally distributed': '方式的一个項目通常是分散式',
'The weight in kg.': '加權中公斤。',
'The': '此',
'Theme Details': '佈景主題詳細資料',
'Theme added': '新增布景主題',
'Theme deleted': '刪除布景主題',
'Theme updated': '更新布景主題',
'Theme': '佈景主題',
'Themes': '佈景主題',
'There are errors': '有錯誤',
'There are insufficient items in the Inventory to send this shipment': '沒有足够的項目中資產传送至此出貨',
'There are multiple records at this location': '有多个記錄在這个位置',
'There are not sufficient items in the Inventory to send this shipment': '沒有足够的項目庫存中传送至此出貨',
'There are several ways which you can use to select the Location.': '有幾種方法可用來選取位置。',
'There is no Sahana account associated with that OpenID. Would you like to create one?': '沒有Sahana账户相關的OpenID。 您要建立一個嗎?',
'There is no address for this person yet. Add new address.': '沒有針對這个人員尚未。 新增地址。',
'There was a problem, sorry, please try again later.': '有問題,很抱歉,請稍后再試一次。',
'These are settings for Inbound Mail.': '這些設定的入埠郵件。',
'These are the Incident Categories visible to normal End-Users': '這些事件類別可見正常結束-使用者',
'These are the default settings for all users. To change settings just for you, click': '這些是預設的所有使用者的設定。 若要變更設定為您量身打造的,按一下',
'These need to be added in Decimal Degrees.': '這些需要新增以小數度。',
'They': '他們',
'This Group has no Members yet': '沒有成員目前登錄',
'This Team has no Members yet': '沒有成員目前登錄',
'This appears to be a duplicate of': '這顯然是一个重复的',
'This email address is already in use': '這個email已經被使用',
'This file already exists on the server as': '這个檔案已存在于伺服器上為',
'This form allows the administrator to remove a duplicate location by 1st updating all references to it by a different location.': '此表單可讓管理者來移除重复的位置第1更新所有参照另一个位置。',
'This form allows the administrator to remove a duplicate location.': '此表單可讓管理者來移除重复的位置。',
'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': '這是才適用這个層次在建构。 以防止意外修改之后,這个層次后,可以將其設為false。',
'This is the way to transfer data between machines as it maintains referential integrity.': '這是向之間传送資料的機器,因為它維護参照完整性。',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': '這是向之間传送資料的機器,因為它維護参照完整性。。應該手動移除重复的資料第1!',
'This level is not open for editing.': '這个層次不開啟進行編輯。',
'This might be due to a temporary overloading or maintenance of the server.': '這可能是由于暫時超載或維護的伺服器。',
'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': '此模組容許庫存項目要求及發布之間的庫存的機能。',
'This module allows you to manage Events - whether pre-planned (e.g. exercises) or Live Incidents. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': '這個單元可管理事件,不論事前計畫(如預演)或事件發生時。你可以安排適當的資源,如人力、物資、設備等,使其能更容易被動員',
'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': '此模組可讓您計划的實務練習和事件。 您可以配置適當的資源(人力,資產和設備),以便它們可以mobilized簡單。',
'This module assists the management of fatalities and the identification of the deceased.': '這个模組會协助管理的fatalities和識別的死亡。',
'This page provides you with information about how to use the automatic synchronization feature of Sahana': '這个頁面提供您的相關資訊,請使用"自動同步處理功能的Sahana',
'This page shows you logs of past syncs. Click on the link below to go to this page.': '這个頁面顯示您的日誌之前的同步。 按一下下面的鏈結,以跳至此頁面。',
'This screen allows you to upload a collection of photos to the server.': '這个畫面可讓您上传的集合,照片至伺服器。',
'This setting can only be controlled by the Administrator.': '這項設定只能由"管理者"。',
'This shipment has already been received.': '貨物已收到。',
'This shipment has already been sent.': '貨物已送出。',
'This shipment has not been received - it has NOT been canceled because it can still be edited.': '未收到貨物-尚未取消,因為仍然可以編輯。',
'This shipment has not been sent - it has NOT been canceled because it can still be edited.': '此出貨尚未传送其尚未取消,因為仍然可以編輯。',
'This shipment will be confirmed as received.': '這會在確認出貨為已接收。',
'Thursday': '星期四',
'Ticket Details': '問題單詳細內容',
'Ticket ID': '通行證 ID',
'Ticket added': '新增問題單',
'Ticket deleted': '單刪除',
'Ticket updated': '票据更新',
'Ticket': '通行證',
'Ticketing Module': '待辦事項模組',
'Tickets': '通行證',
'Tilt-up concrete': '傾斜的具体',
'Timber frame': 'Timber訊框',
'Time Stamp': '時間戳記',
'Time at which data was exchanged.': '時間資料交換。',
'Time needed to collect water': '需要時間來收集臨界值',
'Time of Request': '要求時間',
'Timeline Report': '報告時間表',
'Timeline': '時間表',
'Timestamp': '時間戳記',
'Title to show for the Web Map Service panel in the Tools panel.': '標題顯示的網頁對映服務"中的"在"工具"畫面。',
'Title': '標題',
'To Location': '終點位置',
'To Organization': '目標組織',
'To Person': '將人員',
'To Site': '目標場所',
'To begin the sync process, click the button on the right =>': '開始同步程序,請按一下右邊的按鈕=>',
'To begin the sync process, click this button =>': '開始同步程序,請按一下這个按鈕=>',
'To create a personal map configuration, click ': '若要建立個人化地圖設定,請點選 ',
'To create a personal map configuration, click': '若要建立个人配置,請按一下對映',
'To delete': '要刪除',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': '若要編輯OpenStreetMap,您必须編輯OpenStreetMap設定模型/000_config.. py',
'To search by job title, enter any portion of the title. You may use % as wildcard.': '搜尋工作職稱,輸入任何部分的標題。 您可以使用%作為通配符。',
'To submit a new job, use the': '提交一个新的工作,使用',
'To variable': '到變數',
'To': '起飛',
'Tools': '工具',
'Total # of Beneficiaries Reached': '總數的受益人達到',
'Total # of Target Beneficiaries': '總數的目標受益人',
'Total # of households of site visited': '總數的家庭场所的訪問',
'Total Beds': '總計Beds',
'Total Beneficiaries': '總受益人',
'Total Cost per Megabyte': '每MB成本總計',
'Total Cost per Minute': '每分鐘的總成本',
'Total Households': '總家庭',
'Total Monthly Cost': '每月成本總計',
'Total Monthly Cost:': '每月成本總計:',
'Total Monthly': '每月總計',
'Total No of Affectees (Including Students, Teachers & Others)': '總沒有的Affectees (包括學員,教師及其他)',
'Total No of Female Affectees (Including Students, Teachers & Others)': '總沒有的女性Affectees (包括學員,教師及其他)',
'Total No of Male Affectees (Including Students, Teachers & Others)': '總沒有的男性Affectees (包括學員,教師及其他)',
'Total No of Students (Primary To Higher Secondary) in the Total Affectees': '總沒有的學員(主要較次要)總數中Affectees',
'Total No of Teachers & Other Govt Servants in the Total Affectees': '總沒有的教職員和其他政府服務者總數中Affectees',
'Total One-time Costs': '總計一-時間成本',
'Total Persons': '總人員',
'Total Recurring Costs': '總循環成本',
'Total Unit Cost': '總單位成本',
'Total Unit Cost:': '總單位成本:',
'Total Units': '裝置總計',
'Total gross floor area (square meters)': '總毛利底板區域(平方公尺)',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'beds總數在此醫院。 自動更新從每日報告。',
'Total number of houses in the area': '總數可容納的范圍內',
'Total number of schools in affected area': '總數學校中受影响的區域',
'Total population of site visited': '總体的網站瀏览',
'Total': '總計',
'Totals for Budget:': '預算的總計:',
'Totals for Bundle:': '總額的軟体組:',
'Totals for Kit:': '總額的套件:',
'Tourist Group': '觀光團',
'Town': '鄉鎮',
'Traces internally displaced people (IDPs) and their needs': '跟踪內部移動人員(IDP)及其需求',
'Tracing': '追蹤',
'Track Details': '追蹤詳細資料',
'Track deleted': '刪除跟踪',
'Track updated': '更新跟踪',
'Track uploaded': '跟踪上传',
'Track with this Person?': '跟踪与此人員嗎?',
'Track': '追蹤',
'Tracking of Projects, Activities and Tasks': '追蹤的專案,活動和任務',
'Tracking of basic information on the location, facilities and size of the Shelters': '追蹤的基本資訊的位置,設備和大小Shelters',
'Tracks requests for aid and matches them against donors who have pledged aid': '追蹤要求的輔助和符合那些對donors擁有抵押輔助',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': '追蹤位置,分佈值,容量和分解的受害者中Shelters',
'Tracks': '磁軌',
'Traffic Report': '資料流量報告',
'Training Course Catalog': '訓練課程型錄',
'Training Details': '訓練詳細資料',
'Training added': '新增訓練',
'Training deleted': '刪除訓練',
'Training updated': '訓練更新',
'Training': '訓練',
'Trainings': '撰文',
'Transit Status': '传輸狀態',
'Transit': '運輸',
'Transit. Status': '運輸。 狀態',
'Transition Effect': '轉變的效果',
'Transparent?': '透明?',
'Transport': '傳輸',
'Transportation assistance, Rank': '交通工具輔助,等級',
'Trauma Center': 'Trauma中心',
'Travel Cost': '旅行成本',
'Treatments': '離群值',
'Tree': '樹狀結構',
'Tropical Storm': '暴雨熱帶',
'Tropo Messaging Token': 'Tropo記號传訊',
'Tropo Settings': 'Tropo設定',
'Tropo Voice Token': 'Tropo語音記號',
'Tropo settings updated': 'Tropo更新設定',
'Truck': '卡車',
'Try checking the URL for errors, maybe it was mistyped.': '嘗試檢查的URL錯誤,可能是輸入錯誤。',
'Try hitting refresh/reload button or trying the URL from the address bar again.': "嘗試按重新整理/載入按鈕或試着的URL從'網址'列。",
'Try refreshing the page or hitting the back button on your browser.': '請嘗試重新整理頁面,或按"上一頁"按鈕的瀏览器。',
'Tsunami': '海嘯',
'Tuesday': '星期二',
'Twitter ID or #hashtag': 'Twitter ID或#hashtag',
'Twitter Settings': 'Twitter設定',
'Twitter': '推特',
'Type of Construction': '類型的建构',
'Type of cause': '原因類型',
'Type of place for defecation': '的工作區類型的defecation',
'Type of water source before the disaster': '類型的臨界值來源前的災難',
'Type': '類型',
'Type:': '類型:',
'Types of health services available': '類型的健康服務可用',
'Types of water storage containers available': '類型的臨界值儲存体儲存區可用',
'Types': '類型',
'URL of the Ushahidi instance': 'Ushahidi實例的網址',
'URL': '網址',
'UTC Offset': '世界標準時間時差',
'UUID of foreign Sahana server': 'UUID的外部Sahana伺服器',
'Un-Repairable': '取消-修复',
'Unable to parse CSV file!': '無法剖析CSV檔!',
'Unidentified': '識別',
'Union Council': '聯集委員會',
'Unit Bed Capacity': '單元容量平台',
'Unit Cost': '單位成本',
'Unit Details': '單元詳細資料',
'Unit Name': '單元名稱',
'Unit Set': '單元設定',
'Unit Short Code for e.g. m for meter.': '空頭代碼單元例如M的計量。',
'Unit added': '新增單元',
'Unit deleted': '刪除單元',
'Unit of Measure': '測量單位',
'Unit updated': '單元更新',
'Unit': '裝置',
'Units of Measure': '測量單位',
'Units': '單元',
'Unknown Peer': '不明的同層級',
'Unknown type of facility': '不明類型的機能',
'Unknown': '不明',
'Unresolved Conflicts': '尚未解决的冲突',
'Unselect to disable the modem': '若要停用取消數据機',
'Unsent': '未傳送',
'Unsupported data format!': '不受支援的資料格式!',
'Unsupported method!': '不受支援的方法!',
'Update Activity Report': '更新活動報告',
'Update Cholera Treatment Capability Information': '更新Cholera處理功能資訊',
'Update Details': '更新詳細資料',
'Update Import Job': '更新匯入工作',
'Update Request': '更新要求',
'Update Service Profile': '更新服務設定檔',
'Update Status': '更新狀態',
'Update Task Status': '更新作業狀態',
'Update Unit': '更新單元',
'Update added': '新增更新',
'Update deleted': '刪除更新',
'Update if Master': '如果更新主要',
'Update if Newer': '若較新則更新',
'Update updated': '更新更新',
'Update your current ordered list': '更新您現行排序清單',
'Update': '更新項目',
'Update/Master': '更新/主要',
'Update/Newer': '更新/新',
'Updated By': '更新者',
'Updates': '更新項目',
'Upload Photos': '上传照片',
'Upload Spreadsheet': '上传試算表',
'Upload Track': '上传跟踪',
'Upload a Spreadsheet': '上传一个試算表',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': '上传影像檔案(BMP, GIF, JPEG或PNG),最大 300x300像素!',
'Upload an image file here.': '上传影像檔案在這裡。',
'Upload an image, such as a photo': '上传影像,例如圖片',
'Upload': '上傳',
'Urban Fire': '都市發動',
'Urban area': '都市區域',
'Urdu': '烏都文',
'Urgent': '緊急',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '使用(...)&(...)的, (...)|(...)的或,且~(...)的建置更复雜的查詢。',
'Use Geocoder for address lookups?': '使用地理編碼程式的位址查閱嗎?',
'Use default from feature class': '使用從特性預設類別',
'Use default': '使用預設值',
'Use these links to download data that is currently in the database.': '使用這些鏈結來下載資料中的現行資料庫。',
'Use this link to review the situation.': '請利用這个鏈結來檢視狀湟。',
'Use this space to add a description about the Bin Type.': '使用此空間來新增說明的bin類型。',
'Use this space to add a description about the site location.': '使用此空間來新增說明的站點位置。',
'Use this space to add a description about the warehouse/site.': '使用此空間來新增說明倉儲或網站。',
'Use this space to add additional comments and notes about the Site/Warehouse.': '使用此空間來新增其他註解和附註的相關站點/WAREHOUSE。',
'Use this to indicate that the person has been found.': '使用此項來表示此人已被找到。',
'Used by IRS & Assess': '使用已送交IRS及評估',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': '在使用onHover工具提示和叢集蹦現以區分類型。',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': '用來建置onHover和工具提示第1欄位也用于叢集蹦現以區分記錄。',
'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': '用來檢查的緯度輸入的位置是否合理。 可用來過濾列出的資源的位置。',
'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': '用來檢查輸入的經度位置是否合理。 可用來過濾列出的資源的位置。',
'Used to import data from spreadsheets into the database': '用來匯入資料到工作表的資料庫',
'Used within Inventory Management, Request Management and Asset Management': '使用"庫存"內的管理要求管理"和"資產管理',
'User %(id)s Logged-in': '使用者 %(id)s 的登入',
'User %(id)s Logged-out': '使用者 %(id)s 登入登出',
'User %(id)s Profile updated': '使用者 %(id)s 設定檔更新',
'User %(id)s Registered': '使用者 %(id)s 登錄',
'User Account has been Disabled': '使用者账户已停用',
'User Details': '使用者詳細資料',
'User ID': '使用者 ID',
'User Management': '使用者管理',
'User Profile': '使用者設定檔',
'User Requests': '使用者要求',
'User Updated': '已更新使用者',
'User added': '已新增使用者',
'User already has this role': '使用者已具有此角色',
'User deleted': '已刪除使用者',
'User updated': '已更新使用者',
'User': '使用者',
'Username & Password': '使用者密碼(&P)',
'Username for authentication at the peer. Note that only HTTP Basic authentication is supported.': '使用者名稱的鉴別的同層級。 注意,只支援HTTP基本鉴別。',
'Username': '使用者名稱',
'Users can collaboratively add markers of what is occuring.': '使用者可以新增合作的標記是什么發生。',
'Users removed': '移除使用者',
'Users': '使用者',
'Uses the REST Query Format defined in': '使用其他查詢中定义的格式',
'Usual food sources in the area': '一般來源食品的范圍內',
'Utilities': '公用程式',
'Utility, telecommunication, other non-transport infrastructure': '公用程式,電信,其他非传輸基礎架构',
'Value': '值',
'Various Reporting functionalities': '各種報告功能',
'Vehicle Crime': '車輛犯罪',
'Vehicle Types': '車輛類型',
'Vehicle': '車輛',
'Vendor': '供應商',
'Verification Status': '驗證狀態',
'Verified': '已驗證',
'Verified?': '驗證?',
'Verify Password': '驗證密碼',
'Verify password': '驗證密碼',
'Version': '版本',
'Very Good': '非常良好',
'Very High': '非常高',
'View & Edit Pledges': '檢視和編輯抵押',
'View Alerts received using either Email or SMS': '檢視透過電子郵件或簡訊收到的通知',
'View All': '全部檢視',
'View Error Tickets': '檢視摘記卷錯誤',
'View Fullscreen Map': '檢視全螢幕對映',
'View Image': '檢視影像',
'View Items': '檢視項目',
'View Map': '檢視對映',
'View On Map': '檢視上對映',
'View Outbox': '檢視寄件匣',
'View Picture': '檢視圖片',
'View Requests & Pledge Aid': '檢視要求与抵押輔助',
'View Requests for Aid': '檢視要求的輔助',
'View Settings': '視圖設定',
'View Situation Map': '檢視狀湟對映',
'View Tickets': '檢視摘記卷',
'View and/or update their details': '檢視及/或更新其詳細資料',
'View or update the status of a hospital.': '檢視或更新狀態的醫院。',
'View pending requests and pledge support.': '檢視擱置要求和抵押支援。',
'View the hospitals on a map.': '檢視醫院圖上。',
'View/Edit the Database directly': '檢視/編輯資料庫直接',
'Village Leader': '村落領導者',
'Village': '村落',
'Visible?': '可見嗎?',
'Visual Recognition': '視覺化辨識',
'Volcanic Ash Cloud': 'Volcanic灰燼雲端',
'Volcanic Event': 'Volcanic事件',
'Volume (m3)': '磁區(M3)',
'Volume - Fluids': '磁區-液体',
'Volume - Solids': '磁區的固体',
'Volume Capacity': '容體容量',
'Volume/Dimensions': '磁碟區/維度',
'Volunteer Availability': '自愿可用性',
'Volunteer Data': '自愿資料',
'Volunteer Details': '自愿詳細資料',
'Volunteer Information': '志工資訊',
'Volunteer Management': '主動管理',
'Volunteer Project': '志願者專案',
'Volunteer Record': '志工記錄',
'Volunteer Registration': '志願者登錄',
'Volunteer Registrations': '自愿登錄',
'Volunteer Request': '自愿要求',
'Volunteer added': '新增志工',
'Volunteer availability added': '自愿可用性新增',
'Volunteer availability deleted': '自愿可用性刪除',
'Volunteer availability updated': '自愿可用性更新',
'Volunteer deleted': '志工刪除',
'Volunteer details updated': '更新志願者詳細資料',
'Volunteer location': '志願者位置',
'Volunteer registration added': '已新增志願者登記',
'Volunteer registration deleted': '已刪除志願者登記',
'Volunteer registration updated': '自愿登錄更新',
'Volunteers were notified!': '已主動通知!',
'Volunteers': '志工',
'Vote': '表決',
'Votes': '表決',
'WASH': '清洗',
'WFP Assessments': '世界糧食計劃組織的評估',
'WMS Browser Name': 'WMS瀏览器名稱',
'WMS Browser URL': 'WMS瀏览器URL',
'Walking Only': '僅查訪',
'Walking time to the health service': '遍訪時間,健康狀態"服務程式',
'Wall or other structural damage': '牆面或其他結构損壞',
'Warehouse Details': '詳細資料倉儲',
'Warehouse Management': '倉儲管理',
'Warehouse added': '新增倉儲',
'Warehouse deleted': '刪除倉庫',
'Warehouse updated': '更新倉儲',
'Warehouse': '倉儲',
'Warehouse/Sites Registry': '倉庫/站點登錄',
'Warehouses': '倉庫',
'Water Sanitation Hygiene': '水Hygiene設施',
'Water collection': '水集合',
'Water gallon': '水加侖',
'Water storage containers available for HH': '水儲存体儲存區可用的hh',
'Water storage containers in households': '水儲存体儲存區中家庭',
'Water storage containers sufficient per HH': '水儲存体儲存區足够每hh',
'Water supply': '水供應',
'Water': '水',
'Way Bill(s)': '方式账單(S)',
'We have tried': '我們已經嘗試',
'Web Map Service Browser Name': 'Web瀏览器名稱對映服務',
'Web Map Service Browser URL': 'Web瀏览器URL對映服務',
'Website': '網站',
'Wednesday': '星期三',
'Weekly': '每週',
'Weight (kg)': '重量 (公斤)',
'Weight': '重量',
'Welcome to the Sahana Eden Disaster Management System': '歡迎使用 Sahana Eden 救災管理系統',
'Welcome to the Sahana Portal at': '歡迎使用Sahana入口網站',
'Well-Known Text': '常用文字',
'Were basic medical supplies available for health services prior to the disaster?': '在基本醫療用品可用的健康服務之前,災難?',
'Were breast milk substitutes used prior to the disaster?': 'breast到espresso使用替換之前,災難?',
'Were there cases of malnutrition in this area prior to the disaster?': '在這個災難之前,這個地區有營養不良的個案嗎?',
'Were there health services functioning for the community prior to the disaster?': '在這個災難之前,這個地區有正常運作的健康醫療服務嗎?',
'Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?': '有報告或的迹象爆發的任何micronutrient malnutrition disorders之前的緊急嗎?',
'What are the factors affecting school attendance?': '有哪些因素會影響到學校上課的出席率?',
'What are your main sources of cash to restart your business?': '什麼是你重新創業的主要現金資本來源?',
'What are your main sources of income now?': '什麼是你現階段主要收入來源?',
'What do you spend most of your income on now?': '現階段你大部份收入花用在什麼地方?',
'What food stocks exist? (main dishes)': '什么食品股票存在嗎? (主要餐盤)',
'What food stocks exist? (side dishes)': '什么食品股票存在嗎? (側面餐盤)',
'What is the estimated total number of people in all of these institutions?': '把這些機構的人全部加起來, 大約總共是多少人?',
'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?': '什麼是你每天主要的清潔用水來源 (例如: 清洗, 煮飯, 洗澡)?',
'What is your major source of drinking water?': '你的飲用水主要是來自於那裡?',
'What type of latrines are available in the village/IDP centre/Camp?': '什麼類型的廁所在村/ IDP中心/營可用?',
'What type of salvage material can be used from destroyed houses?': '什麼類型的打撈材料從被摧毀的房屋可用?',
'What type of salvage material can be used from destroyed schools?': '什麼類型的打撈材料從被摧毀的學校可用?',
'What types of health problems do children currently have?': '孩子目前有什麼類型的衛生問題?',
'What types of health problems do people currently have?': '人目前有什麼類型的衛生問題?',
'What types of health services are still functioning in the affected area?': '在受影響的地區什麼類型的衛生服務仍然運作?',
'What types of household water storage containers are available?': '有什麼類型的家庭儲水容器可用?',
'What were your main sources of income before the disaster?': '災難之前什麼是你的主要收入來源?',
'Wheat': '小麥',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': '當這項對映會出現,旨在點的集合,這項對映會縮放,以僅顯示區域外框的點。 這个值新增小裝載的距離外的點。 無此項,最外側的點上外框,且可能不可見。',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': '當這項對映會出現,旨在點的集合,這項對映會縮放,以僅顯示區域外框的點。 此值提供一个最小寬度和高度度的區域顯示。 否則,一个顯示一个不會顯示任何范圍在該點。 之后,這項對映會出現,它可以放大需求。',
'When reports were entered': '當報告已輸入',
'Where are the alternative places for studying?': '另類的學習地方在哪裡?',
'Where are the separated children originally from?': '位置是分隔的子項最初的?',
'Where do the majority of people defecate?': '在執行大部分的人defecate嗎?',
'Where have the children been sent?': '在具有子項已传送?',
'Where is solid waste disposed in the village/camp?': '位置是實心廢棄物丟棄在村落/camp?',
'Whereabouts': '下落',
'Whether this is a Sahana Eden, Sahana Agasti, Ushahidi or Other instance.': '這是否為Sahana Eden, Sahana Agasti, Ushahidi或其他實例。',
'Which API function was called, it can only have two values: getdata refers to data export operation and putdata refers to data import operation.': '該API函數呼叫,它只能有二个值: getData参照資料匯出作業, putdata是指資料匯入作業。',
'Who is doing what and where': '誰正在做什么和位置',
'Who usually collects water for the family?': '誰通常收集水的系列?',
'Width (m)': '寬度(M)',
'Width': '寬度',
'Wild Fire': '萬用字元發動',
'Wind Chill': '風硬化',
'Window frame': '視窗框',
'Winter Storm': '冬季暴雨',
'Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?': '在不提出任何人名或暗示任何人的情況下,你是否知道自從災難發生後, 有沒有任何對婦女或女孩暴力的事件?',
'Women of Child Bearing Age': '婦女的子項軸承經歷時間',
'Women participating in coping activities': '婦女参与复制活動',
'Women who are Pregnant or in Labour': '女性是Pregnant或在人工',
'Womens Focus Groups': 'Womens專題',
'Wooden plank': '木制plank',
'Wooden poles': '木制poles',
'Working hours end': '結束工作時數',
'Working hours start': '工作小時開始',
'Working or other to provide money/food': '工作或其他提供金錢/餐飲',
'Would you like to display the photos on the map?': '您想顯示的照片上的對映?',
'X-Ray': 'X光',
'YES': '是',
'Year built': '建置年份',
'Year of Manufacture': '年的制造',
'Yellow': '黃色',
'Yes': '是',
'You are a recovery team?': '您的回复團隊?',
'You are attempting to delete your own account - are you sure you want to proceed?': '您正在嘗試刪除您自己的账户-您確定要繼續進行嗎?',
'You are currently reported missing!': '您目前報告遺漏!',
'You can add information about your organization here. It is the information which other servers can read about you.': '您可以新增組織的相關資訊在這裡。 這是資訊的其他伺服器可以閱讀您。',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': '您可以變更的配置同步化模組設定"區段中。 此配置包括您的UUID (唯一識別號碼),同步化排程, Beacon服務等等。 按一下下列鏈結以跳至"同步設定"頁面。',
'You can click on the map below to select the Lat/Lon fields': '您可以按一下"對映"下面選擇的平面/長欄位',
'You can click on the map below to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.': '您可以按一下"對映"下面選擇的平面/長欄位。 經度是西-East (短)。 緯度是北美-南-(上下)。 緯度是〇equator与正在北部地區部分和負數在南部部分。 經度是〇本初子午线(格林威治標準時間表示),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。 需要將此新增以小數度。',
'You can click on the map below to select the Lat/Lon fields:': '您可以按一下"對映"下面選擇的平面/長欄位:',
'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.': '您可以按一下在對映至選取的平面/長欄位。 經度是西-East (短)。 緯度是北美-南-(上下)。 緯度是〇equator与正在北部地區部分和負數在南部部分。 經度是〇本初子午线(格林威治標準時間表示),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。 需要將此新增以小數度。',
'You can select the Draw tool (': '您可以選取繪制工具',
'You can select the Draw tool': '您可以選取繪制工具',
'You can set the modem settings for SMS here.': '您可以設定數据機設定的SMS這裡。',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': '您可以使用轉換工具來轉換或GPS座標或度/分鐘/秒。',
'You do no have permission to cancel this received shipment.': '您沒有許可權來取消此接收出貨。',
'You do no have permission to cancel this sent shipment.': '您沒有許可權來取消此传送出貨。',
'You do no have permission to make this commitment.': '您沒有許可權來進行此承諾。',
'You do no have permission to receive this shipment.': '您沒有許可權來接收此出貨。',
'You do no have permission to send this shipment.': '您沒有許可權來传送這份出貨。',
'You do not have permission for any facility to make a commitment.': '您沒有許可權的任何機能來使承諾。',
'You do not have permission for any facility to make a request.': '您沒有許可權的任何機能來提出請求。',
'You do not have permission for any site to add an inventory item.': '您沒有許可權的任何網站,以新增一个庫存項目。',
'You do not have permission for any site to receive a shipment.': '您沒有許可權的任何網站接收出貨。',
'You do not have permission for any site to send a shipment.': '您沒有許可權的任何網站传送出貨。',
'You do not have permission to cancel this received shipment.': '您沒有許可權來取消此接收出貨。',
'You do not have permission to cancel this sent shipment.': '您沒有許可權來取消此传送出貨。',
'You do not have permission to make this commitment.': '您沒有權限可讓此承諾。',
'You do not have permission to receive this shipment.': '您沒有許可權來接收這个出貨。',
'You do not have permission to send a shipment from this site.': '您沒有許可權來传送運送產品這个網站。',
'You do not have permission to send this shipment.': '您沒有許可權來传送這份出貨。',
'You have a personal map configuration. To change your personal configuration, click': '您有一个个人對映配置。 若要變更您的个人配置,請按一下',
'You have found a dead body?': '您找到一个停用身体?',
'You must be logged in to register volunteers.': '您必须登入,才能登錄参与者。',
'You must be logged in to report persons missing or found.': '您必须登入,才能報告人員遺漏或找到。',
'You must provide a series id to proceed.': '您必须提供一个系列ID來繼續。',
'You should edit Twitter settings in models/000_config.py': '您應該編輯Twitter中的設定模型/000_config.. py',
'Your action is required. Please approve user %s asap:': '您的動作是必要的。 請核准使用者%s ASAP:',
'Your action is required. Please approve user': '您的動作是必要的。 請核准使用者',
'Your current ordered list of solution items is shown below. You can change it by voting again.': '您的現行排序清單的解决方案項目如下所示。 您可以變更它的表决。',
'Your post was added successfully.': '已順利新增您的文章。',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': '您的系统已被指派一个唯一的識別碼(UUID),它在其他電腦可讓您用來識別您。 若要檢視您的UUID,您可以跳至同步化->同步設定。 您也可以查看其他設定這个頁面上。',
'Your unique identification key. It is a 16 character word (aka string). Other servers in your organization will recognize you from this.': '您的唯一識別金鑰。 它是一个16字元字組(亦稱為字串)。 其他伺服器的組織會識別您。',
'ZIP/Postcode': 'ZIP/郵遞區號',
'Zero Hour': '〇小時',
'Zeroconf Description': 'ZeroConf說明',
'Zeroconf Port': 'ZeroConf埠',
'Zinc roof': '鋅安設',
'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': '放大:按一下在對映中,或使用滑鼠左鍵并拖動滑鼠來建立一个矩形',
'Zoom Levels': '縮放級別',
'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': '縮小:按一下在對映中,或使用滑鼠左鍵并拖動滑鼠來建立一个矩形',
'Zoom to maximum map extent': '縮放至對映上限范圍',
'Zoom': '縮放',
'accepted': '接受',
'act': '行動',
'active': '作用中',
'added': '已新增',
'all records': '所有記錄',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': '允許的預算來開發基于員工和設備成本,包括任何管理成本。',
'allows for creation and management of surveys to assess the damage following a natural disaster.': '可讓您建立和管理的調查來評估損壞之后,自然災難。',
'an individual/team to do in 1-2 days': '个別團隊/一中執行的二天',
'approved': '已核准',
'are mandatory and must be filled': '的欄位為必填',
'assigned': '已指派',
'average': '平均值',
'black': '黑色',
'blond': '金色',
'blue': '藍色',
'brown': '棕色',
'by': '根據',
'c/o Name': 'C I/O名稱',
'can be used to extract data from spreadsheets and put them into database tables.': '可用來擷取資料的試算表和放置到資料庫表格。',
'check all': '勾選「全部」',
'click for more details': '按一下以取得更多詳細資料',
'collateral event': '抵押品事件',
'completed': '已完成',
'confirmed': '已確認',
'consider': '考量',
'criminal intent': '犯罪目的',
'critical': '重要',
'crud': 'CRUD',
'curly': '大括弧',
'currently registered': '目前登錄',
'daily': '每日',
'data uploaded': '上传資料',
'database %s select': '資料庫%選取',
'database': '資料庫',
'db': 'DB',
'deceased': '死亡',
'deferred': '延遲',
'delete all checked': '所有已刪除',
'delete': '刪除',
'deleted': '已刪除',
'denied': '已拒絕',
'description': '說明',
'design': '設計',
'diseased': '死者',
'displaced': '移離',
'divorced': '離婚',
'done!': '完成!',
'duplicate': '重複',
'edit': '編輯',
'editor': '編輯者',
'eg. gas, electricity, water': '例如: 瓦斯,電力,水',
'embedded': '內嵌的',
'enclosed area': '括住區域',
'export as csv file': '匯出為CSV檔案',
'fat': 'FAT',
'feedback': '讀者意見',
'female': '女性',
'final report': '最終報告',
'flush latrine with septic tank': 'latrine清除与septic油槽',
'follow-up assessment': '后續評量',
'food_sources': '食物來源',
'form data': '表單資料',
'found': '找到',
'from Twitter': '從Twitter',
'full': '滿載',
'getting': '取得',
'green': '綠色',
'grey': '灰色',
'here': '這裡',
'high': '高',
'highly critical': '高度重要',
'hourly': '每小時',
'households': '家庭',
'how to deal with duplicate data found between your machine and that particular sahana instance.': '如何處理重复資料之間找到在您的機器与該特定sahana實例。',
'http://openid.net/get-an-openid/start-using-your-openid/': 'https://myid.tw/profile/help',
'human error': '人為錯誤',
'identified': '識別',
'ignore': '忽略',
'immediately': '立即',
'improvement': '改進',
'in Deg Min Sec format': '在度最小秒格式',
'in GPS format': '在GPS格式',
'inactive': '非作用中',
'initial assessment': '起始評量',
'injured': '受傷',
'insert new %s': '插入新的%',
'insert new': '插入新建項目',
'invalid request': '無效要求',
'invalid': '無效',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': '是一个中央线上資料庫位置資訊的所有意外受害者和系列,特別是識別意外, evacuees和移動人員可以儲存。 資訊,如姓名,年齡,聯絡人編號,身分證號碼,取代位置,以及其他詳細資料擷取。 圖片和指紋詳細資料的人可以上传至系统中。 人員也可以擷取群組的效率和方便。',
'is an online bulletin board of missing and found people. It captures information about the people missing and found, as well as information of the person seeking them, increasing the chances of people finding each other. For example if two members of a family unit is looking for the head of the family, we can use this data at least to connect those two family members.': '是一个线上公布欄的遺失及找到的人員。 它會擷取資訊的人遺失及找到的,以及資訊的人員辨認,增加機會的人的其他人。 例如,如果二个成員的一系列單元正在尋找的標頭,系列,我們可以使用這个資料至少連接二个系列的成員。',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': '可想而知是要由數个子模組合力提供复雜的功能,用于管理釋放和專案項目來組織。 這包括進氣系统,倉儲管理系统,商品追蹤,供應鏈管理,車隊管理,採購,財務追蹤和其他資產和資源管理功能',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': '跟踪記錄所有傳入門票,讓他們進行分類和路由到適當的地方行動。',
'keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '追蹤所有的組織工作在災難區域。 它不只會擷取工作區所作用中,但也會擷取的相關資訊范圍的專案會提供每一个區域。',
'leave empty to detach account': '保留空白以分離账户',
'legend URL': '圖註URL',
'light': '光亮',
'locations': '位置',
'login': '登入',
'long': 'Long',
'long>12cm': '超過12cm',
'low': '低',
'male': '男性',
'manual': '手動',
'married': '已婚',
'medium': '中',
'menu item': '功能表項目',
'meters': '米',
'missing': '遺漏',
'module allows the site administrator to configure various options.': '模組可讓網站管理者配置的各種選項。',
'module helps monitoring the status of hospitals.': '模組有助于監視狀態的醫院。',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': '模組提供一種機制來提供合作開發的概述,意外,使用連线對映(GIS)。',
'more': '更多模組',
'n/a': '不適用',
'natural hazard': '自然危害',
'never': '絕不',
'new record inserted': '插入新記錄',
'new': '新建',
'next 100 rows': '下100个橫列',
'no': '無影響',
'non-critical': '非重要',
'none': '無',
'normal': '正常',
'not accessible - no cached version available!': '無法存取-無快取可用版本!',
'not accessible - using cached version from': '無法存取-使用快取的版本。',
'not specified': '未指定',
'not writable - unable to cache GeoRSS layers!': '無法寫入-無法快取GeoRSS層!',
'not writable - unable to cache KML layers!': '無法寫入-無法快取KML層!',
'num Zoom Levels': 'num个縮放級別',
'obsolete': '已作廢',
'on': '開啟',
'once': '一次',
'open defecation': '開啟defecation',
'operational intent': '目的作業',
'optional': '選用',
'or import from csv file': '或從CSV檔案',
'other': '其他',
'over one hour': '上一个小時',
'people': '個人',
'piece': '片段',
'pit latrine': 'PIT latrine',
'pit': 'PIT',
'postponed': '延遲',
'preliminary template or draft, not actionable in its current form': '初步范本或初稿,不可在其現行表單',
'previous 100 rows': '前100个橫列',
'primary incident': '主要事件',
'provides a catalogue of digital media.': '提供一个型錄的數位媒体。',
'record does not exist': '記錄不存在',
'record id': '記錄 ID',
'records deleted': '已刪除的記錄',
'red': '紅色',
'refresh': '重新整理',
'reported': '已報告',
'reports successfully imported.': '報告已順利匯入。',
'representation of the Polygon/Line.': '表示法的多邊形/行。',
'retired': '已撤回',
'retry': '重試',
'review': '檢閱',
'river': '金水河',
'secondary effect': '次要效果',
'see comment': '請参閱註解',
'selected': '已選取',
'separated from family': '分開系列',
'separated': '分居',
'short': 'Short',
'sides': '側邊',
'sign-up now': '現在註冊',
'simple': '簡單',
'single': '單身',
'skills': '技術',
'slim': 'Slim',
'specify': '指定',
'staff members': '人員成員',
'staff': '人員',
'state location': '位置狀態',
'state': '狀況',
'status': '狀態',
'straight': '直線',
'suffered financial losses': '艱辛的財務損失',
'supports nurses in the field to assess the situation, report on their activities and keep oversight.': '支援nurses,以將欄位中評估狀湟,報告其活動和保留監督。',
'table': '表格',
'tall': '頁高',
'technical failure': '技術失敗',
'times and it is still not working. We give in. Sorry.': '時間,它仍無法運作。 我們提供。 抱歉。',
'to access the system': '來使用系統功能',
'total': '總計',
'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': '追蹤所有shelters和儲存基本相關資訊。 它与其他模組追蹤人員相關聯的shelter,可用的服務等等。',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'tweepy模組內無法使用執行中的Python-這需要安裝非Tropo Twitter支援中心!',
'unable to parse csv file': '無法剖析CSV檔',
'unapproved': '核准',
'uncheck all': '取消全選',
'unidentified': '識別',
'uninhabitable = foundation and structure destroyed': 'uninhabitable = Foundation及結构損毀',
'unknown': '不明',
'unspecified': '未指定的',
'unverified': '未驗證',
'updated': '已更新',
'updates only': '僅更新',
'urgent': '緊急',
'verified': '已驗證',
'view': '視圖',
'volunteer': '志工',
'volunteers': '志工',
'wavy': '波浪形',
'weekly': '每週',
'white': '白色',
'wider area, longer term, usually contain multiple Activities': '寬區域,長期的,通常包含多个活動',
'widowed': '鰥居',
'window': '視窗',
'windows broken, cracks in walls, roof slightly damaged': 'Windows中斷,是否在牆面,屋脊略有損壞',
'wish': '希望',
'within human habitat': '在人類居住的範圍',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt模組內無法使用執行中的Python-這需要安裝XLS輸出!',
'yes': '是',
}
|
bobrock/eden
|
languages/zh-tw.py
|
Python
|
mit
| 338,458
|
[
"VisIt"
] |
e8a2d4ef1d298c9d7f3c031adc5ee9be9943728daf5dd618b020cf4b14c902be
|
import unittest
from test import test_support
from itertools import *
from weakref import proxy
from decimal import Decimal
from fractions import Fraction
import sys
import operator
import random
import copy
import pickle
from functools import reduce
maxsize = test_support.MAX_Py_ssize_t
minsize = -maxsize-1
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def next(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def fact(n):
'Factorial'
return prod(range(1, n+1))
class TestBasicOps(unittest.TestCase):
def test_chain(self):
def chain2(*iterables):
'Pure python version in the docs'
for it in iterables:
for element in it:
yield element
for c in (chain, chain2):
self.assertEqual(list(c('abc', 'def')), list('abcdef'))
self.assertEqual(list(c('abc')), list('abc'))
self.assertEqual(list(c('')), [])
self.assertEqual(take(4, c('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, list,c(2, 3))
def test_chain_from_iterable(self):
self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef'))
self.assertEqual(list(chain.from_iterable(['abc'])), list('abc'))
self.assertEqual(list(chain.from_iterable([''])), [])
self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd'))
self.assertRaises(TypeError, list, chain.from_iterable([2, 3]))
def test_combinations(self):
self.assertRaises(TypeError, combinations, 'abc') # missing r argument
self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, combinations, None) # pool is not iterable
self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative
self.assertEqual(list(combinations('abc', 32)), []) # r > n
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def combinations1(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations3(iterable, r):
'Pure python version from cwr()'
pool = tuple(iterable)
n = len(pool)
for indices in combinations_with_replacement(range(n), r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(combinations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) // fact(r) // fact(n-r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for c in result:
self.assertEqual(len(c), r) # r-length combinations
self.assertEqual(len(set(c)), r) # no duplicate elements
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(list(c),
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version
self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version
self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_tuple_reuse(self):
self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1)
def test_combinations_with_replacement(self):
cwr = combinations_with_replacement
self.assertRaises(TypeError, cwr, 'abc') # missing r argument
self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, cwr, None) # pool is not iterable
self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative
self.assertEqual(list(cwr('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def cwr1(iterable, r):
'Pure python version shown in the docs'
# number items returned: (n+r-1)! / r! / (n-1)! when n>0
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def cwr2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def numcombs(n, r):
if not n:
return 0 if r else 1
return fact(n+r-1) // fact(r) // fact(n-1)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(cwr(values, r))
self.assertEqual(len(result), numcombs(n, r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
regular_combs = list(combinations(values, r)) # compare to combs without replacement
if n == 0 or r <= 1:
self.assertEqual(result, regular_combs) # cases that should be identical
else:
self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs
for c in result:
self.assertEqual(len(c), r) # r-length combinations
noruns = [k for k,v in groupby(c)] # combo without consecutive repeats
self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(noruns,
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version
self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_with_replacement_tuple_reuse(self):
cwr = combinations_with_replacement
self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1)
def test_permutations(self):
self.assertRaises(TypeError, permutations) # too few arguments
self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, permutations, None) # pool is not iterable
self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative
self.assertEqual(list(permutations('abc', 32)), []) # r > n
self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None
self.assertEqual(list(permutations(range(3), 2)),
[(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)])
def permutations1(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = range(n)
cycles = range(n, n-r, -1)
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def permutations2(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(permutations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) // fact(n-r)) # right number of perms
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for p in result:
self.assertEqual(len(p), r) # r-length permutations
self.assertEqual(len(set(p)), r) # no duplicate elements
self.assertTrue(all(e in values for e in p)) # elements taken from input iterable
self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version
self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version
if r == n:
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
@test_support.impl_detail("tuple resuse is CPython specific")
def test_permutations_tuple_reuse(self):
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
def test_combinatorics(self):
# Test relationships between product(), permutations(),
# combinations() and combinations_with_replacement().
for n in range(6):
s = 'ABCDEFG'[:n]
for r in range(8):
prod = list(product(s, repeat=r))
cwr = list(combinations_with_replacement(s, r))
perm = list(permutations(s, r))
comb = list(combinations(s, r))
# Check size
self.assertEqual(len(prod), n**r)
self.assertEqual(len(cwr), (fact(n+r-1) // fact(r) // fact(n-1)) if n else (not r))
self.assertEqual(len(perm), 0 if r>n else fact(n) // fact(n-r))
self.assertEqual(len(comb), 0 if r>n else fact(n) // fact(r) // fact(n-r))
# Check lexicographic order without repeated tuples
self.assertEqual(prod, sorted(set(prod)))
self.assertEqual(cwr, sorted(set(cwr)))
self.assertEqual(perm, sorted(set(perm)))
self.assertEqual(comb, sorted(set(comb)))
# Check interrelationships
self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted
self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups
self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted
self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups
self.assertEqual(comb, filter(set(cwr).__contains__, perm)) # comb: perm that is a cwr
self.assertEqual(comb, filter(set(perm).__contains__, cwr)) # comb: cwr that is a perm
self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm
def test_compress(self):
self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list(''))
self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC'))
self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC'))
n = 10000
data = chain.from_iterable(repeat(range(6), n))
selectors = chain.from_iterable(repeat((0, 1)))
self.assertEqual(list(compress(data, selectors)), [1,3,5] * n)
self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable
self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable
self.assertRaises(TypeError, compress, range(6)) # too few args
self.assertRaises(TypeError, compress, range(6), None) # too many args
def test_count(self):
self.assertEqual(zip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(zip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, zip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)])
self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)])
self.assertRaises(TypeError, count, 2, 3, 4)
self.assertRaises(TypeError, count, 'a')
self.assertEqual(list(islice(count(maxsize-5), 10)), range(maxsize-5, maxsize+5))
self.assertEqual(list(islice(count(-maxsize-5), 10)), range(-maxsize-5, -maxsize+5))
c = count(3)
self.assertEqual(repr(c), 'count(3)')
c.next()
self.assertEqual(repr(c), 'count(4)')
c = count(-9)
self.assertEqual(repr(c), 'count(-9)')
c.next()
self.assertEqual(repr(count(10.25)), 'count(10.25)')
self.assertEqual(c.next(), -8)
for i in (-sys.maxint-5, -sys.maxint+5 ,-10, -1, 0, 10, sys.maxint-5, sys.maxint+5):
# Test repr (ignoring the L in longs)
r1 = repr(count(i)).replace('L', '')
r2 = 'count(%r)'.__mod__(i).replace('L', '')
self.assertEqual(r1, r2)
# check copy, deepcopy, pickle
for value in -3, 3, sys.maxint-5, sys.maxint+5:
c = count(value)
self.assertEqual(next(copy.copy(c)), value)
self.assertEqual(next(copy.deepcopy(c)), value)
self.assertEqual(next(pickle.loads(pickle.dumps(c))), value)
def test_count_with_stride(self):
self.assertEqual(zip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(zip('abc',count(start=2,step=3)),
[('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(zip('abc',count(step=-1)),
[('a', 0), ('b', -1), ('c', -2)])
self.assertEqual(zip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)])
self.assertEqual(zip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)])
self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3)))
self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3)))
self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j])
self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))),
[Decimal('1.1'), Decimal('1.2'), Decimal('1.3')])
self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))),
[Fraction(2,3), Fraction(17,21), Fraction(20,21)])
self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0]))
c = count(3, 5)
self.assertEqual(repr(c), 'count(3, 5)')
c.next()
self.assertEqual(repr(c), 'count(8, 5)')
c = count(-9, 0)
self.assertEqual(repr(c), 'count(-9, 0)')
c.next()
self.assertEqual(repr(c), 'count(-9, 0)')
c = count(-9, -3)
self.assertEqual(repr(c), 'count(-9, -3)')
c.next()
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)')
self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int
self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0
for i in (-sys.maxint-5, -sys.maxint+5 ,-10, -1, 0, 10, sys.maxint-5, sys.maxint+5):
for j in (-sys.maxint-5, -sys.maxint+5 ,-10, -1, 0, 1, 10, sys.maxint-5, sys.maxint+5):
# Test repr (ignoring the L in longs)
r1 = repr(count(i, j)).replace('L', '')
if j == 1:
r2 = ('count(%r)' % i).replace('L', '')
else:
r2 = ('count(%r, %r)' % (i, j)).replace('L', '')
self.assertEqual(r1, r2)
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, lambda r:r[0]):
for ik, ig in groupby(g, lambda r:r[2]):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, lambda r:r[0])]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.next failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.next failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.next failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __cmp__ failure
class DummyCmp:
def __cmp__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __cmp__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __cmp__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
def test_ifilter(self):
self.assertEqual(list(ifilter(isEven, range(6))), [0,2,4])
self.assertEqual(list(ifilter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(list(ifilter(bool, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, ifilter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, ifilter)
self.assertRaises(TypeError, ifilter, lambda x:x)
self.assertRaises(TypeError, ifilter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, ifilter, isEven, 3)
self.assertRaises(TypeError, ifilter(range(6), range(6)).next)
def test_ifilterfalse(self):
self.assertEqual(list(ifilterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(ifilterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(list(ifilterfalse(bool, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, ifilterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, ifilterfalse)
self.assertRaises(TypeError, ifilterfalse, lambda x:x)
self.assertRaises(TypeError, ifilterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, ifilterfalse, isEven, 3)
self.assertRaises(TypeError, ifilterfalse(range(6), range(6)).next)
def test_izip(self):
ans = [(x,y) for x, y in izip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(izip('abc', range(6))), zip('abc', range(6)))
self.assertEqual(list(izip('abcdef', range(3))), zip('abcdef', range(3)))
self.assertEqual(take(3,izip('abcdef', count())), zip('abcdef', range(3)))
self.assertEqual(list(izip('abcdef')), zip('abcdef'))
self.assertEqual(list(izip()), zip())
self.assertRaises(TypeError, izip, 3)
self.assertRaises(TypeError, izip, range(3), 3)
self.assertEqual([tuple(list(pair)) for pair in izip('abc', 'def')],
zip('abc', 'def'))
self.assertEqual([pair for pair in izip('abc', 'def')],
zip('abc', 'def'))
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_izip_tuple_resuse(self):
ids = map(id, izip('abc', 'def'))
self.assertEqual(min(ids), max(ids))
ids = map(id, list(izip('abc', 'def')))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_iziplongest(self):
for args in [
['abc', range(6)],
[range(6), 'abc'],
[range(1000), range(2000,2100), range(3000,3050)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)],
]:
# target = map(None, *args) <- this raises a py3k warning
# this is the replacement:
target = [tuple([arg[i] if i < len(arg) else None for arg in args])
for i in range(max(map(len, args)))]
self.assertEqual(list(izip_longest(*args)), target)
self.assertEqual(list(izip_longest(*args, **{})), target)
target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X'
self.assertEqual(list(izip_longest(*args, **dict(fillvalue='X'))), target)
self.assertEqual(take(3,izip_longest('abcdef', count())), zip('abcdef', range(3))) # take 3 from infinite input
self.assertEqual(list(izip_longest()), zip())
self.assertEqual(list(izip_longest([])), zip([]))
self.assertEqual(list(izip_longest('abcdef')), zip('abcdef'))
self.assertEqual(list(izip_longest('abc', 'defg', **{})),
zip(list('abc') + [None], 'defg')) # empty keyword dict
self.assertRaises(TypeError, izip_longest, 3)
self.assertRaises(TypeError, izip_longest, range(3), 3)
for stmt in [
"izip_longest('abc', fv=1)",
"izip_longest('abc', fillvalue=1, bogus_keyword=None)",
]:
try:
eval(stmt, globals(), locals())
except TypeError:
pass
else:
self.fail('Did not raise Type in: ' + stmt)
self.assertEqual([tuple(list(pair)) for pair in izip_longest('abc', 'def')],
zip('abc', 'def'))
self.assertEqual([pair for pair in izip_longest('abc', 'def')],
zip('abc', 'def'))
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_izip_longest_tuple_reuse(self):
ids = map(id, izip_longest('abc', 'def'))
self.assertEqual(min(ids), max(ids))
ids = map(id, list(izip_longest('abc', 'def')))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_bug_7244(self):
class Repeater(object):
# this class is similar to itertools.repeat
def __init__(self, o, t, e):
self.o = o
self.t = int(t)
self.e = e
def __iter__(self): # its iterator is itself
return self
def next(self):
if self.t > 0:
self.t -= 1
return self.o
else:
raise self.e
# Formerly this code in would fail in debug mode
# with Undetected Error and Stop Iteration
r1 = Repeater(1, 3, StopIteration)
r2 = Repeater(2, 4, StopIteration)
def run(r1, r2):
result = []
for i, j in izip_longest(r1, r2, fillvalue=0):
with test_support.captured_output('stdout'):
print (i, j)
result.append((i, j))
return result
self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)])
# Formerly, the RuntimeError would be lost
# and StopIteration would stop as expected
r1 = Repeater(1, 3, RuntimeError)
r2 = Repeater(2, 4, StopIteration)
it = izip_longest(r1, r2, fillvalue=0)
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertRaises(RuntimeError, next, it)
def test_product(self):
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(product(*args)), result)
for r in range(4):
self.assertEqual(list(product(*(args*r))),
list(product(*args, **dict(repeat=r))))
self.assertEqual(len(list(product(*[range(7)]*6))), 7**6)
self.assertRaises(TypeError, product, range(6), None)
def product1(*args, **kwds):
pools = map(tuple, args) * kwds.get('repeat', 1)
n = len(pools)
if n == 0:
yield ()
return
if any(len(pool) == 0 for pool in pools):
return
indices = [0] * n
yield tuple(pool[i] for pool, i in zip(pools, indices))
while 1:
for i in reversed(range(n)): # right to left
if indices[i] == len(pools[i]) - 1:
continue
indices[i] += 1
for j in range(i+1, n):
indices[j] = 0
yield tuple(pool[i] for pool, i in zip(pools, indices))
break
else:
return
def product2(*args, **kwds):
'Pure python version used in docs'
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
argtypes = ['', 'abc', '', xrange(0), xrange(4), dict(a=1, b=2, c=3),
set('abcdefg'), range(11), tuple(range(13))]
for i in range(100):
args = [random.choice(argtypes) for j in range(random.randrange(5))]
expected_len = prod(map(len, args))
self.assertEqual(len(list(product(*args))), expected_len)
self.assertEqual(list(product(*args)), list(product1(*args)))
self.assertEqual(list(product(*args)), list(product2(*args)))
args = map(iter, args)
self.assertEqual(len(list(product(*args))), expected_len)
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_product_tuple_reuse(self):
self.assertEqual(len(set(map(id, product('abc', 'def')))), 1)
self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1)
def test_repeat(self):
self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a'])
self.assertEqual(zip(xrange(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
def test_imap(self):
self.assertEqual(list(imap(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
self.assertEqual(list(imap(None, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(imap(None, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,imap(None, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(imap(operator.pow, [])), [])
self.assertRaises(TypeError, imap)
self.assertRaises(TypeError, imap, operator.neg)
self.assertRaises(TypeError, imap(10, range(5)).next)
self.assertRaises(ValueError, imap(errfunc, [4], [5]).next)
self.assertRaises(TypeError, imap(onearg, [4], [5]).next)
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, izip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5])
self.assertRaises(TypeError, list, starmap(operator.pow, [None]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, starmap(10, [(4,5)]).next)
self.assertRaises(ValueError, starmap(errfunc, [(4,5)]).next)
self.assertRaises(TypeError, starmap(onearg, [(4,5)]).next)
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(xrange(100), *args)), range(*args))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(xrange(100), *args)), range(*tgtargs))
# Test stop=None
self.assertEqual(list(islice(xrange(10), None)), range(10))
self.assertEqual(list(islice(xrange(10), None, None)), range(10))
self.assertEqual(list(islice(xrange(10), None, None, None)), range(10))
self.assertEqual(list(islice(xrange(10), 2, None)), range(2, 10))
self.assertEqual(list(islice(xrange(10), 1, None, 2)), range(1, 10, 2))
# Test number of items consumed SF #1171417
it = iter(range(10))
self.assertEqual(list(islice(it, 3)), range(3))
self.assertEqual(list(it), range(3, 10))
# Test invalid arguments
self.assertRaises(TypeError, islice, xrange(10))
self.assertRaises(TypeError, islice, xrange(10), 1, 2, 3, 4)
self.assertRaises(ValueError, islice, xrange(10), -5, 10, 1)
self.assertRaises(ValueError, islice, xrange(10), 1, -5, -1)
self.assertRaises(ValueError, islice, xrange(10), 1, 10, -1)
self.assertRaises(ValueError, islice, xrange(10), 1, 10, 0)
self.assertRaises(ValueError, islice, xrange(10), 'a')
self.assertRaises(ValueError, islice, xrange(10), 'a', 1)
self.assertRaises(ValueError, islice, xrange(10), 1, 'a')
self.assertRaises(ValueError, islice, xrange(10), 'a', 1, 1)
self.assertRaises(ValueError, islice, xrange(10), 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1)
# Issue #10323: Less islice in a predictable state
c = count()
self.assertEqual(list(islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, takewhile(10, [(4,5)]).next)
self.assertRaises(ValueError, takewhile(errfunc, [(4,5)]).next)
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, t.next)
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, dropwhile(10, [(4,5)]).next)
self.assertRaises(ValueError, dropwhile(errfunc, [(4,5)]).next)
def test_tee(self):
n = 200
def irange(n):
for i in xrange(n):
yield i
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(zip(a,b), zip(range(n),range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), range(n))
self.assertEqual(list(b), range(n))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in xrange(100):
self.assertEqual(a.next(), i)
del a
self.assertEqual(list(b), range(n))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in xrange(100):
self.assertEqual(a.next(), i)
del b
self.assertEqual(list(a), range(100, n))
for j in xrange(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = its[i].next()
lists[i].append(value)
self.assertEqual(lists[0], range(n))
self.assertEqual(lists[1], range(n))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(xrange(2000), 3)
for i in xrange(100):
self.assertEqual(a.next(), i)
self.assertEqual(list(b), range(2000))
self.assertEqual([c.next(), c.next()], range(2))
self.assertEqual(list(a), range(100,2000))
self.assertEqual(list(c), range(2,2000))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
self.assertRaises(ValueError, tee, [], -1)
for n in xrange(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual(map(list, result), [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assertTrue(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(xrange(10))
p = proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
self.assertRaises(ReferenceError, getattr, p, '__class__')
def test_StopIteration(self):
self.assertRaises(StopIteration, izip().next)
for f in (chain, cycle, izip, groupby):
self.assertRaises(StopIteration, f([]).next)
self.assertRaises(StopIteration, f(StopNow()).next)
self.assertRaises(StopIteration, islice([], None).next)
self.assertRaises(StopIteration, islice(StopNow(), None).next)
p, q = tee([])
self.assertRaises(StopIteration, p.next)
self.assertRaises(StopIteration, q.next)
p, q = tee(StopNow())
self.assertRaises(StopIteration, p.next)
self.assertRaises(StopIteration, q.next)
self.assertRaises(StopIteration, repeat(None, 0).next)
for f in (ifilter, ifilterfalse, imap, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, f(lambda x:x, []).next)
self.assertRaises(StopIteration, f(lambda x:x, StopNow()).next)
class TestExamples(unittest.TestCase):
def test_chain(self):
self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF')
def test_chain_from_iterable(self):
self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF')
def test_combinations(self):
self.assertEqual(list(combinations('ABCD', 2)),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def test_combinations_with_replacement(self):
self.assertEqual(list(combinations_with_replacement('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def test_compress(self):
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
def test_count(self):
self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14])
def test_cycle(self):
self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD'))
def test_dropwhile(self):
self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1])
def test_groupby(self):
self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')],
list('ABCDAB'))
self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')],
[list('AAAA'), list('BBB'), list('CC'), list('D')])
def test_ifilter(self):
self.assertEqual(list(ifilter(lambda x: x%2, range(10))), [1,3,5,7,9])
def test_ifilterfalse(self):
self.assertEqual(list(ifilterfalse(lambda x: x%2, range(10))), [0,2,4,6,8])
def test_imap(self):
self.assertEqual(list(imap(pow, (2,3,10), (5,2,3))), [32, 9, 1000])
def test_islice(self):
self.assertEqual(list(islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG'))
def test_izip(self):
self.assertEqual(list(izip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')])
def test_izip_longest(self):
self.assertEqual(list(izip_longest('ABCD', 'xy', fillvalue='-')),
[('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')])
def test_permutations(self):
self.assertEqual(list(permutations('ABCD', 2)),
map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split()))
self.assertEqual(list(permutations(range(3))),
[(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)])
def test_product(self):
self.assertEqual(list(product('ABCD', 'xy')),
map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split()))
self.assertEqual(list(product(range(2), repeat=3)),
[(0,0,0), (0,0,1), (0,1,0), (0,1,1),
(1,0,0), (1,0,1), (1,1,0), (1,1,1)])
def test_repeat(self):
self.assertEqual(list(repeat(10, 3)), [10, 10, 10])
def test_stapmap(self):
self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])),
[32, 9, 1000])
def test_takewhile(self):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
iterator.next()
del container, iterator
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_combinations_with_replacement(self):
a = []
self.makecycle(combinations_with_replacement([1,2,a,3], 3), a)
def test_compress(self):
a = []
self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a)
def test_count(self):
a = []
Int = type('Int', (int,), dict(x=a))
self.makecycle(count(Int(0), Int(1)), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(xrange(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_ifilter(self):
a = []
self.makecycle(ifilter(lambda x:True, [a]*2), a)
def test_ifilterfalse(self):
a = []
self.makecycle(ifilterfalse(lambda x:False, a), a)
def test_izip(self):
a = []
self.makecycle(izip([a]*2, [a]*3), a)
def test_izip_longest(self):
a = []
self.makecycle(izip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(izip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_imap(self):
a = []
self.makecycle(imap(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing next()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def next(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(imap(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_compress(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
n = len(s)
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(compress(g(s), repeat(1))), list(g(s)))
self.assertRaises(TypeError, compress, X(s), repeat(1))
self.assertRaises(TypeError, list, compress(N(s), repeat(1)))
self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, list, cycle(N(s)))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, list, groupby(N(s)))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_ifilter(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(ifilter(isEven, g(s))), filter(isEven, g(s)))
self.assertRaises(TypeError, ifilter, isEven, X(s))
self.assertRaises(TypeError, list, ifilter(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, ifilter(isEven, E(s)))
def test_ifilterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(ifilterfalse(isEven, g(s))), filter(isOdd, g(s)))
self.assertRaises(TypeError, ifilterfalse, isEven, X(s))
self.assertRaises(TypeError, list, ifilterfalse(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, ifilterfalse(isEven, E(s)))
def test_izip(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(izip(g(s))), zip(g(s)))
self.assertEqual(list(izip(g(s), g(s))), zip(g(s), g(s)))
self.assertRaises(TypeError, izip, X(s))
self.assertRaises(TypeError, list, izip(N(s)))
self.assertRaises(ZeroDivisionError, list, izip(E(s)))
def test_iziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(izip_longest(g(s))), zip(g(s)))
self.assertEqual(list(izip_longest(g(s), g(s))), zip(g(s), g(s)))
self.assertRaises(TypeError, izip_longest, X(s))
self.assertRaises(TypeError, list, izip_longest(N(s)))
self.assertRaises(ZeroDivisionError, list, izip_longest(E(s)))
def test_imap(self):
for s in (range(10), range(0), range(100), (7,11), xrange(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(imap(onearg, g(s))), map(onearg, g(s)))
self.assertEqual(list(imap(operator.pow, g(s), g(s))), map(operator.pow, g(s), g(s)))
self.assertRaises(TypeError, imap, onearg, X(s))
self.assertRaises(TypeError, list, imap(onearg, N(s)))
self.assertRaises(ZeroDivisionError, list, imap(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, list, islice(N(s), 10))
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), xrange(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = zip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))), map(operator.pow, g(s), g(s)))
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, list, starmap(operator.pow, N(ss)))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, list, takewhile(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, list, dropwhile(isOdd, N(s)))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, list, tee(N(s))[0])
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
def test_repeat(self):
from test.test_iterlen import len
self.assertEqual(len(repeat(None, 50)), 50)
self.assertRaises(TypeError, len, repeat(None))
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(z.next())
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = imap(g, items)
z = izip(*[gen]*len(tuple1))
z.next()
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
if x:
raise StopIteration
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
class SubclassWithKwargsTest(unittest.TestCase):
def test_keywords_in_subclass(self):
# count is not subclassable...
for cls in (repeat, izip, ifilter, ifilterfalse, chain, imap,
starmap, islice, takewhile, dropwhile, cycle, compress):
class Subclass(cls):
def __init__(self, newarg=None, *args):
cls.__init__(self, *args)
try:
Subclass(newarg=1)
except TypeError, err:
# we expect type errors because of wrong argument count
self.assertNotIn("does not take keyword arguments", err.args[0])
libreftest = """ Doctest for examples in the library reference: libitertools.tex
>>> amounts = [120.15, 764.05, 823.14]
>>> for checknum, amount in izip(count(1200), amounts):
... print 'Check %d is for $%.2f' % (checknum, amount)
...
Check 1200 is for $120.15
Check 1201 is for $764.05
Check 1202 is for $823.14
>>> import operator
>>> for cube in imap(operator.pow, xrange(1,4), repeat(3)):
... print cube
...
1
8
27
>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
>>> for name in islice(reportlines, 3, None, 2):
... print name.title()
...
Alex
Laura
Martin
Walter
Samuele
>>> from operator import itemgetter
>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
>>> di = sorted(sorted(d.iteritems()), key=itemgetter(1))
>>> for k, g in groupby(di, itemgetter(1)):
... print k, map(itemgetter(0), g)
...
1 ['a', 'c', 'e']
2 ['b', 'd', 'f']
3 ['g']
# Find runs of consecutive numbers using groupby. The key to the solution
# is differencing with a range so that consecutive numbers all appear in
# same group.
>>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
>>> for k, g in groupby(enumerate(data), lambda t:t[0]-t[1]):
... print map(operator.itemgetter(1), g)
...
[1]
[4, 5, 6]
[10]
[15, 16, 17, 18]
[22]
[25, 26, 27, 28]
>>> def take(n, iterable):
... "Return first n items of the iterable as a list"
... return list(islice(iterable, n))
>>> def enumerate(iterable, start=0):
... return izip(count(start), iterable)
>>> def tabulate(function, start=0):
... "Return function(0), function(1), ..."
... return imap(function, count(start))
>>> def nth(iterable, n, default=None):
... "Returns the nth item or a default value"
... return next(islice(iterable, n, None), default)
>>> def quantify(iterable, pred=bool):
... "Count how many times the predicate is true"
... return sum(imap(pred, iterable))
>>> def padnone(iterable):
... "Returns the sequence elements and then returns None indefinitely"
... return chain(iterable, repeat(None))
>>> def ncycles(iterable, n):
... "Returns the sequence elements n times"
... return chain(*repeat(iterable, n))
>>> def dotproduct(vec1, vec2):
... return sum(imap(operator.mul, vec1, vec2))
>>> def flatten(listOfLists):
... return list(chain.from_iterable(listOfLists))
>>> def repeatfunc(func, times=None, *args):
... "Repeat calls to func with specified arguments."
... " Example: repeatfunc(random.random)"
... if times is None:
... return starmap(func, repeat(args))
... else:
... return starmap(func, repeat(args, times))
>>> def pairwise(iterable):
... "s -> (s0,s1), (s1,s2), (s2, s3), ..."
... a, b = tee(iterable)
... for elem in b:
... break
... return izip(a, b)
>>> def grouper(n, iterable, fillvalue=None):
... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
... args = [iter(iterable)] * n
... return izip_longest(fillvalue=fillvalue, *args)
>>> def roundrobin(*iterables):
... "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
... # Recipe credited to George Sakkis
... pending = len(iterables)
... nexts = cycle(iter(it).next for it in iterables)
... while pending:
... try:
... for next in nexts:
... yield next()
... except StopIteration:
... pending -= 1
... nexts = cycle(islice(nexts, pending))
>>> def powerset(iterable):
... "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
... s = list(iterable)
... return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
>>> def unique_everseen(iterable, key=None):
... "List unique elements, preserving order. Remember all elements ever seen."
... # unique_everseen('AAAABBBCCDAABBB') --> A B C D
... # unique_everseen('ABBCcAD', str.lower) --> A B C D
... seen = set()
... seen_add = seen.add
... if key is None:
... for element in iterable:
... if element not in seen:
... seen_add(element)
... yield element
... else:
... for element in iterable:
... k = key(element)
... if k not in seen:
... seen_add(k)
... yield element
>>> def unique_justseen(iterable, key=None):
... "List unique elements, preserving order. Remember only the element just seen."
... # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
... # unique_justseen('ABBCcAD', str.lower) --> A B C A D
... return imap(next, imap(itemgetter(1), groupby(iterable, key)))
This is not part of the examples but it tests to make sure the definitions
perform as purported.
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
>>> nth('abcde', 3)
'd'
>>> nth('abcde', 9) is None
True
>>> quantify(xrange(99), lambda x: x%2==0)
50
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> flatten(a)
[1, 2, 3, 4, 5, 6]
>>> list(repeatfunc(pow, 5, 2, 3))
[8, 8, 8, 8, 8]
>>> import random
>>> take(5, imap(int, repeatfunc(random.random)))
[0, 0, 0, 0, 0]
>>> list(pairwise('abcd'))
[('a', 'b'), ('b', 'c'), ('c', 'd')]
>>> list(pairwise([]))
[]
>>> list(pairwise('a'))
[]
>>> list(islice(padnone('abc'), 0, 6))
['a', 'b', 'c', None, None, None]
>>> list(ncycles('abc', 3))
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
>>> dotproduct([1,2,3], [4,5,6])
32
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]
>>> list(roundrobin('abc', 'd', 'ef'))
['a', 'd', 'e', 'b', 'f', 'c']
>>> list(powerset([1,2,3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
>>> all(len(list(powerset(range(n)))) == 2**n for n in range(18))
True
>>> list(powerset('abcde')) == sorted(sorted(set(powerset('abcde'))), key=len)
True
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
"""
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
SubclassWithKwargsTest, TestExamples)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
# doctest the examples in the library reference
test_support.run_doctest(sys.modules[__name__], verbose)
if __name__ == "__main__":
test_main(verbose=True)
|
ljgabc/lfs
|
usr/lib/python2.7/test/test_itertools.py
|
Python
|
gpl-2.0
| 67,130
|
[
"GULP"
] |
9ef90a39ba7d055300362d5709f69af2a22c9424c614a4a06d171e726725c110
|
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.vasp.outputs import Xdatcar
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.core.trajectory import Trajectory
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
import numpy as np
import os
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class TrajectoryTest(PymatgenTest):
def setUp(self):
xdatcar = Xdatcar(os.path.join(test_dir, "Traj_XDATCAR"))
self.traj = Trajectory.from_file(os.path.join(test_dir, "Traj_XDATCAR"))
self.structures = xdatcar.structures
def test_single_index_slice(self):
self.assertTrue(all([self.traj[i] == self.structures[i] for i in range(0, len(self.structures), 19)]))
def test_slice(self):
sliced_traj = self.traj[2:99:3]
sliced_traj_from_structs = Trajectory.from_structures(self.structures[2:99:3])
if len(sliced_traj) == len(sliced_traj_from_structs):
self.assertTrue(all([sliced_traj[i] == sliced_traj_from_structs[i] for i in range(len(sliced_traj))]))
else:
self.assertTrue(False)
def test_conversion(self):
# Convert to displacements and back. Check structures
self.traj.to_displacements()
self.traj.to_positions()
self.assertTrue(all([struct == self.structures[i] for i, struct in enumerate(self.traj)]))
def test_copy(self):
traj_copy = self.traj.copy()
self.assertTrue(all([i == j for i, j in zip(self.traj, traj_copy)]))
def test_extend(self):
traj = self.traj.copy()
# Case of compatible trajectories
compatible_traj = Trajectory.from_file(os.path.join(test_dir, "Traj_Combine_Test_XDATCAR_1"))
traj.extend(compatible_traj)
full_traj = Trajectory.from_file(os.path.join(test_dir, "Traj_Combine_Test_XDATCAR_Full"))
compatible_success = self._check_traj_equality(self.traj, full_traj)
# Case of incompatible trajectories
traj = self.traj.copy()
incompatible_traj = Trajectory.from_file(os.path.join(test_dir, "Traj_Combine_Test_XDATCAR_2"))
incompatible_test_success=False
try:
traj.extend(incompatible_traj)
except:
incompatible_test_success=True
self.assertTrue(compatible_success and incompatible_test_success)
def test_length(self):
self.assertTrue(len(self.traj) == len(self.structures))
def test_displacements(self):
poscar = Poscar.from_file(os.path.join(test_dir, "POSCAR"))
structures = [poscar.structure]
displacements = np.zeros((11, *np.shape(structures[-1].frac_coords)))
for i in range(10):
displacement = np.random.random_sample(np.shape(structures[-1].frac_coords)) / 20
new_coords = displacement + structures[-1].frac_coords
structures.append(Structure(structures[-1].lattice, structures[-1].species, new_coords))
displacements[i+1, :, :] = displacement
traj = Trajectory.from_structures(structures, constant_lattice=True)
traj.to_displacements()
self.assertTrue(np.allclose(traj.frac_coords, displacements))
def test_changing_lattice(self):
structure = self.structures[0]
# Generate structures with different lattices
structures = []
for i in range(10):
new_lattice = np.dot(structure.lattice.matrix, np.diag(1 + np.random.random_sample(3)/20))
temp_struct = structure.copy()
temp_struct.lattice = Lattice(new_lattice)
structures.append(temp_struct)
traj = Trajectory.from_structures(structures, constant_lattice=False)
# Check if lattices were properly stored
self.assertTrue(
all([np.allclose(struct.lattice.matrix, structures[i].lattice.matrix) for i, struct in enumerate(traj)]))
def test_to_from_dict(self):
d = self.traj.as_dict()
traj = Trajectory.from_dict(d)
self.assertEqual(type(traj), Trajectory)
def _check_traj_equality(self, traj_1, traj_2):
if np.sum(np.square(np.subtract(traj_1.lattice, traj_2.lattice))) > 0.0001:
return False
if traj_1.species != traj_2.species:
return False
return all([i == j for i, j in zip(self.traj, traj_2)])
if __name__ == '__main__':
import unittest
unittest.main()
|
dongsenfo/pymatgen
|
pymatgen/core/tests/test_trajectory.py
|
Python
|
mit
| 4,483
|
[
"VASP",
"pymatgen"
] |
f10defcc642837817e653a1c31b14469251b5781bcf28da4e19e68be446240bf
|
"""The idea behind this module is to take the code that is repeated many times in the application and refactor it
so that the following functions can be used multiple times rather using duplicate code."""
import wx, wx.html
from wx.lib.mixins.listctrl import CheckListCtrlMixin, ListCtrlAutoWidthMixin
import MySQLdb, sys
import wx.lib.analogclock as ac
import time, datetime, tempfile
import wx.grid
import textwrap, os, sane
import settings
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.units import inch
from wx.html import HtmlEasyPrinting
from pyPdf import PdfFileWriter, PdfFileReader
def buildOneButton(instance, parent, label, handler, sizer=None):
button = wx.Button(parent, -1, label)
instance.Bind(wx.EVT_BUTTON, handler, button)
if sizer == None:
pass
else:
sizer.Add(button, 1, wx.EXPAND|wx.ALL, 3)
return button
def buildOneTextCtrl(instance, label, size, sizer=None):
"""This function stores the newly created text controls in a dictionary for future reference."""
instance.textctrl[label] = wx.TextCtrl(instance, -1, size=(size, -1), name=label)
if sizer == None:
pass
else:
f = wx.Font(8, wx.SWISS, wx.NORMAL, wx.NORMAL)
l = wx.StaticText(instance, -1, label)
l.SetFont(f)
sizer.Add(l)
sizer.Add((3, -1))
sizer.Add(instance.textctrl[label])
sizer.Add((15, -1))
class CheckListCtrl(wx.ListCtrl, CheckListCtrlMixin, ListCtrlAutoWidthMixin):
def __init__(self, parent):
wx.ListCtrl.__init__(self, parent, -1, style=wx.LC_REPORT | wx.SUNKEN_BORDER)
CheckListCtrlMixin.__init__(self)
ListCtrlAutoWidthMixin.__init__(self)
def buildCheckListCtrl(instance, columns, data):
"""Robin Dunn helped me with a problem where the program was
crashing, segfault, because the columns I was setting did not match up
with the number of columns I was inserting. This stemmed in part from
using a dictionary cursor. Switching to a regular cursor also gave me
a tuple with the results in the same order every time. The dictionary cursor
gives dictionaries which are unordered."""
ctrl = CheckListCtrl(instance)
index = 0
for columnName, columnWidth in columns:
ctrl.InsertColumn(index, columnName, width = columnWidth)
index = index + 1
for i in data:
col = 1
newindex = ctrl.InsertStringItem(sys.maxint, str(i[0]))
for parts in i[1:]:
ctrl.SetStringItem(newindex, col, str(parts))
col = col + 1
#assert col < len(medcolumns) #this is where Robin Dunn solved my problem
if not col < len(columns):
break #need error msg here
return ctrl
"""class myObjectListView():
def __init__(self, instance, columns, data):
self.ctrl = ObjectListView(instance, -1, style=wx.LC_REPORT|wx.SUNKEN_BORDER)
for columnName, columnWidth in columns:
for i in data:
ctrl.SetColumns(ColumnDefn(columnName, "left", columnWidth, i)"""
def updateList(data, a_list):
a_list.DeleteAllItems()
for i in data:
col = 1
newindex = a_list.InsertStringItem(sys.maxint, str(i[0]))
for parts in i[1:]:
a_list.SetStringItem(newindex, col, str(parts))
col = col + 1
#assert col < len(medcolumns) #this is where Robin Dunn solved my problem
if not col < len(i):
break #need error msg here
def getData(query):
a = wx.GetApp()
cursor = a.conn.cursor()
cursor.execute(query)
results = cursor.fetchone()
return results
cursor.close()
def getAllData(query):
a = wx.GetApp()
cursor = a.conn.cursor()
cursor.execute(query)
results = cursor.fetchall()
return results
cursor.close()
def getDictData(query):
a = wx.GetApp()
cursor = a.conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute(query)
results = cursor.fetchone()
return results
cursor.close()
def getAllDictData(query):
a = wx.GetApp()
cursor = a.conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute(query)
results = cursor.fetchall()
return results
cursor.close()
def changeData(newData, table, field, patient_ID):
a = wx.GetApp()
cursor = a.conn.cursor()
results = cursor.execute('SELECT * FROM past_history WHERE patient_ID = %s;' % (patient_ID))
if not results:
cursor.execute('INSERT INTO %s SET %s = "%s", patient_ID = %s;' % (table, field, newData, patient_ID))
else:
try:
#for strings (I'm not sure integers will raise error here)
cursor.execute('UPDATE %s SET %s = "%s" WHERE patient_ID = %s;' % (table, field, newData, patient_ID))
except:
#for integers
cursor.execute('UPDATE %s SET %s = %d WHERE patient_ID = %s;' % (table, field, newData, patient_ID))
cursor.close()
def makeClock(parent, sizer):
clock = ac.AnalogClock(parent, size=(80,80), style=wx.RAISED_BORDER,
hoursStyle=ac.TICKS_DECIMAL,
minutesStyle=ac.TICKS_NONE,
clockStyle=ac.SHOW_HOURS_TICKS| \
ac.SHOW_HOURS_HAND| \
ac.SHOW_MINUTES_HAND)
sizer.Add(clock, 0, wx.ALIGN_BOTTOM | wx.ALL, 5)
return clock
def dateToday(t='no'):
if t == 'no':
date = time.strftime("%Y-%m-%d", time.localtime())
elif t == 'sql':
date = time.strftime("%Y-%m-%d %H.%M.%S", time.localtime()) #comes with time in 00.00.00 format
elif t == 'display':
date = time.strftime("%d %b %Y", time.localtime()) #gives 26 Aug 1965 format for display
elif t == 'file format':
date = time.strftime("%Y-%m-%d_%H.%M.%S", time.localtime()) #comes with underscore between date and time
elif t == 'OA':
date = time.strftime("%m/%d/%y", time.localtime()) #Office Ally format for billing
else:
date = time.strftime("%Y-%m-%d %H%M%S", time.localtime()) #comes with time in 000000 format
return date
def strToDate(string):
date = datetime.datetime.strptime(string, "%Y-%m-%d %H.%M.%S")
return date
def updateData(query):
a = wx.GetApp()
cursor = a.conn.cursor()
cursor.execute(query)
a.conn.commit()
cursor.close()
def valuesUpdateData(query, values):
#this allows binary data to be passed to database
a = wx.GetApp()
cursor = a.conn.cursor()
cursor.execute(query, values)
a.conn.commit()
cursor.close()
class myGrid(wx.grid.Grid):
def __init__(self, parent, ID, PtID, labels, data, hide=[]):
wx.grid.Grid.__init__(self, parent, ID)
self.CreateGrid(len(data), len(labels))
for row in range(len(data)):
for col in range(len(labels)):
self.SetColLabelValue(col, labels[col])
self.SetCellValue(row, col, str(data[row][col]))
self.SetColMinimalAcceptableWidth(0)
self.AutoSize()
self.SetDefaultCellAlignment(wx.ALIGN_CENTER, wx.ALIGN_CENTER)
if hide == []:
pass
else:
for items in hide:
self.SetColSize(items, 0)
self.EnableGridLines(False)
def notePDF(PtID, text, title='Barron Family Medicine', visit_date=dateToday()):
lt = '%s/EMR_outputs/%s/SOAP_notes/%s.pdf' % (settings.LINUXPATH, PtID, visit_date.replace(' ', '_'))
at = '%s/EMR_outputs/%s/SOAP_notes/%s.pdf' % (settings.APPLEPATH, PtID, visit_date.replace(' ', '_'))
wt = '%s\EMR_outputs\%s\SOAP_notes\%s.pdf' % (settings.WINPATH, PtID, visit_date.replace(' ', '_'))
doc = SimpleDocTemplate(platformText(lt, at, wt))
styles = getSampleStyleSheet()
normal = styles['Normal']
h1 = styles['h1']
story = [Paragraph(title, h1)]
story.append(Spacer(1, 0.2*inch))
story.append(Paragraph("<u>%s %s</u>" % (getName(PtID), visit_date), normal))
story.append(Paragraph("<u>DOB: %s</u>" % (getDOB(PtID)), normal))
story.append(Spacer(1, 0.2*inch))
go_bold = ('cc:', 'Problems:', 'Meds:', 'Allergies:', 'Vitals:', 'A/P:', 'SH:', 'FH:')
for i in go_bold:
text = text.replace(i, '<b>%s</b>' % i, 1)
for line in text.split('\n'):
if line == '':
story.append(Spacer(1, 0.15*inch))
else:
story.append(Paragraph(line, normal))
story.append(Spacer(1, 0.2*inch))
story.append(Paragraph("%s, DOB: %s Visit Date: %s End of note" % (getName(PtID), getDOB(PtID), visit_date), normal))
doc.build(story)
def getName(ptID):
qry = "SELECT firstname, lastname FROM demographics WHERE patient_ID = %s" % ptID
results = getData(qry)
name = results[0] + ' ' + results[1]
return name
def getDOB(ptID):
qry = "SELECT dob FROM demographics WHERE patient_ID = %s" % ptID
results = getData(qry)
return results[0]
class Printer(HtmlEasyPrinting):
def __init__(self):
HtmlEasyPrinting.__init__(self)
def GetHtmlText(self,text):
"Simple conversion of text. Use a more powerful version"
html_text = text.replace('\n\n','<P>')
html_text = text.replace('\n', '<BR>')
#html_text = text.replace('\t', ' ')
return html_text
def Print(self, text, doc_name = ''):
self.SetHeader(doc_name)
self.PrintText(text, doc_name)
def PreviewText(self, text, doc_name = ''):
self.SetHeader(doc_name)
HtmlEasyPrinting.PreviewText(self, text)
def getAge(ptID):
qry = 'SELECT dob FROM demographics WHERE patient_ID = %s;' % ptID
dob = getData(qry)
age = datetime.date.today() - dob[0]
if age.days < 32:
result = '%s day old' % age.days
elif 730 > age.days > 31:
months, days = divmod(age.days, 30)
result = '%s month %s day old' % (months, days)
elif age.days > 729:
years, days = divmod(age.days, 365)
result = '%s year old' % years
else: pass
return result
def getAgeYears(ptID):
qry = 'SELECT dob FROM demographics WHERE patient_ID = %s;' % ptID
dob = getData(qry)
today = datetime.date.today()
try:
birthday = dob[0].replace(year=today.year)
except ValueError: # raised when birth date is February 29 and the current year is not a leap year
birthday = dob[0].replace(year=today.year, day=born.day-1)
if birthday > today:
return today.year - dob[0].year - 1
else:
return today.year - dob[0].year
def getSex(ptID):
qry = 'SELECT sex FROM demographics WHERE patient_ID = %s;' % ptID
sex = getData(qry)
return sex[0]
def wrapper(text, width):
"""
A word-wrap function that preserves existing line breaks
and most spaces in the text. Expects that existing line
breaks are posix newlines (\n). This code from Mike Brown found
at http://code.activestate.com/recipes/148061/.
"""
return reduce(lambda line, word, width=width: '%s%s%s' %
(line,
' \n'[(len(line)-line.rfind('\n')-1
+ len(word.split('\n',1)[0]
) >= width)],
word),
text.split(' ')
)
class name_fixer():
def __init__(self, name):
n = name.strip()
self.firstname = ''
self.lastname = ''
#for lastname, firstname or lastname,firstname
if n.count(',') == 1:
np = n.partition(',')
self.firstname = np[2].strip()
self.lastname = np[0].strip()
#for just lastname
elif n.count(' ') == 0 and n.count(',') == 0:
self.lastname = n
#for just firstname if query returns NULL then use name to search for firstnames
#for fistname lastname
elif n.count(' ') == 1 and n.count(',') == 0:
np = n.partition(' ')
self.firstname = np[0]
self.lastname = np[2]
else:
msg = 'Please provide name as: Lastname, Firstname.'
dlg = wx.MessageDialog(None, msg, "I didn't catch that name",
style=wx.OK, pos=wx.DefaultPosition)
dlg.ShowModal()
dlg.Destroy()
pass
class HTML_Frame(wx.Frame):
def __init__(self, parent, title, html_str=''):
wx.Frame.__init__(self, parent, -1, title, size=(700,900))
html = wx.html.HtmlWindow(self)
html.LoadPage(html_str)
def platformText(ltext, atext, wtext):
if sys.platform == 'linux2':
return ltext
elif sys.platform == 'darwin':
return atext
else:
return wtext
def CMS_column_choice(numColumns, markColumn):
#this function marks the chosen column with an 'x' and inserts tabs to match the number of columns
string = ''
for n in range(numColumns):
if n == markColumn:
string = string + "x" + "\t"
else:
string = string + "\t"
return string
def Scan(x, y, ptID, pt_dir, file_name, device='hpaio:/usb/Officejet_4500_G510n-z?serial=CN078H70X905HR', mode='simplex'):
sane.init()
d = sane.get_devices()
lt = "%s/EMR_outputs/%s/%s/%s-%s.pdf" % (settings.LINUXPATH, ptID, pt_dir, file_name, dateToday('file format'))
at = "%s/EMR_outputs/%s/%s/%s-%s.pdf" % (settings.LINUXPATH, ptID, pt_dir, file_name, dateToday('file format'))
wt = "%s\EMR_outputs\%s\%s\%s-%s.pdf" % (settings.WINPATH, ptID, pt_dir, file_name, dateToday('file format'))
if mode == 'duplex':
#check which scanner and then opens the HP which I use for scanning ID cards
try: #need try in case there is only one scanner attached
if d[1][1] == 'Hewlett-Packard':
s = sane.open(d[1][0])
else:
s = sane.open(d[0][0])
except:
s = sane.open(d[0][0])
s.mode = 'gray'
s.resolution = 150
s.br_x = x
s.br_y = y
image = s.scan()
wx.MessageBox("Turn doc over in the scanner, then click 'OK'.", "", wx.OK)
t = tempfile.mkstemp(suffix='.pdf')
image.save(t[1])
image2 = s.scan()
t2 = tempfile.mkstemp(suffix='.pdf')
image2.save(t2[1])
inpt = PdfFileReader(open(t[1], 'rb'))
inpt2 = PdfFileReader(open(t2[1], 'rb'))
otpt = PdfFileWriter()
otpt.addPage(inpt.getPage(0))
otpt.addPage(inpt2.getPage(0))
newfile = file(platformText(lt, at, wt), 'wb')
otpt.write(newfile)
s.close()
os.remove(t[1])
os.remove(t2[1])
sane.exit()
elif mode == 'ADF':
if d[1][1] == 'Hewlett-Packard':
s = sane.open(d[1][0])
else:
s = sane.open(d[0][0])
s.mode = 'gray'
s.resolution = 150
s.br_x = x
s.br_y = y
msg = wx.MessageDialog(None, "Would you like an ADF Scan?", "", style=wx.YES_NO)
otpt = PdfFileWriter()
while msg.ShowModal() == wx.ID_YES:
image = s.multi_scan()
t = tempfile.mkstemp(suffix='.pdf')
image.save(t[1])
inpt = PdfFileReader(open(t[1], 'rb'))
otpt.addPage(inpt.getPage(0))
#s.cancel()
newfile = file(platformText(lt, at, wt), 'wb')
otpt.write(newfile)
s.close()
os.remove(t[1])
sane.exit()
else:
try: #need try in case there is only one scanner attached
if d[1][1] == 'Hewlett-Packard':
s = sane.open(d[1][0])
else:
s = sane.open(d[0][0])
except:
s = sane.open(d[0][0])
s.mode = 'gray'
s.resolution = 150
s.br_x = x
s.br_y = y
image = s.scan()
image.save(platformText(lt, at, wt))
s.close()
sane.exit()
def OnNunova(PtID):
"""This function builds a txt file that iMacros uses to load new pt info via their webpage."""
qry = 'SELECT * FROM demographics WHERE patient_ID = %s;' % PtID
dem_data = getDictData(qry)
if dem_data['state'] == 'MO':
dem_data['state'] = 'Missouri'
elif dem_data['state'] == 'IL':
dem_data['state'] = 'Illinois'
else: print 'pass' #I may need error msg here for people that don't live in the MO/IL area
s = '%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s' % (dem_data['firstname'], dem_data['lastname'], dem_data['address'], \
dem_data['city'], dem_data['state'], dem_data['zipcode'], \
dem_data['phonenumber'], dem_data['dob'].strftime("%m/%d/%Y"), \
dem_data['dob'].strftime('%b'), dem_data['dob'].strftime('%d').lstrip('0'), \
dem_data['dob'].strftime('%Y'), dem_data['sex'].capitalize(), PtID)
with open('/home/mb/Dropbox/iMacros/Datasources/dem_data.txt', 'w') as f:
f.write(s)
f.close()
def createDataFile(PtID, data, macro):
'''This function takes patient_ID and a list of data to pull from the demographics table. Saves macro specific
text file for iMacros in the appropriate Dropbox folder. Items in data must be correct names of demographics fields.'''
qry = 'SELECT * FROM demographics WHERE patient_ID = %s;' % PtID
dem_data = getDictData(qry)
s = ''
dem_data['dob'] = dem_data['dob'].strftime("%m,%d,%Y")
if macro == 'medicare_eligibility':
dem_data['firstname'] = dem_data['firstname'][:1]
else: pass
for items in data:
s = s + dem_data[items] + ', '
with open('/home/mb/Dropbox/iMacros/Datasources/%s.txt' % macro, 'w') as f:
f.write(s)
f.close()
def findObjectAttr(obj):
for item in dir(obj):
print item, ": ", getattr(obj, item)
MESSAGES = ''
|
barronmo/gecko_emr
|
EMR_utilities.py
|
Python
|
gpl-2.0
| 16,710
|
[
"ADF",
"VisIt"
] |
40890cb6593bfe8ebe96966b5e67e14666c4e8f1e1543ca05fa48cc03f769300
|
#!/usr/bin/env python
""" load Visit into OSDF using info from data file """
import os
import re
from cutlass.Visit import Visit
import settings
from cutlass_utils import \
load_data, get_parent_node_id, list_tags, format_query, \
write_csv_headers, values_to_node_dict, write_out_csv, \
load_node, get_field_header, dump_args, log_it, \
get_cur_datetime
filename=os.path.basename(__file__)
log = log_it(filename)
# the Higher-Ups
node_type = 'visit'
parent_type = 'subject'
grand_parent_type = 'study'
great_parent_type = 'project'
node_tracking_file = settings.node_id_tracking.path
class node_values:
visit_id = ''
visit_number = ''
interval = ''
clinic_id = ''
tags = ['rand_subject_id: ','sub_study: ','study: ']
def load(internal_id, search_field):
"""search for existing node to update, else create new"""
# node-specific variables:
NodeTypeName = 'Visit'
NodeLoadFunc = 'load'
return load_node(internal_id, search_field, NodeTypeName, NodeLoadFunc)
def validate_record(parent_id, node, record, data_file_name=node_type):
"""update record fields
validate node
if valid, save, if not, return false
"""
log.info("in validate/save: "+node_type)
csv_fieldnames = get_field_header(data_file_name)
write_csv_headers(data_file_name,fieldnames=csv_fieldnames)
node.visit_id = record['visit_id']
node.visit_number = int(record['visit_number'])
node.interval = int(record['interval'])
node.tags = list_tags(
'rand_subject_id: '+record['rand_subject_id'],
'study: prediabetes',
)
log.debug('parent_id: '+str(parent_id))
node.links = {'by':[parent_id]}
csv_fieldnames = get_field_header(data_file_name)
if not node.is_valid():
write_out_csv(data_file_name+'_invalid_records.csv',
fieldnames=csv_fieldnames,values=[record,])
invalidities = node.validate()
err_str = "Invalid {}!\n\t{}".format(node_type, str(invalidities))
log.error(err_str)
# raise Exception(err_str)
elif node.save():
write_out_csv(data_file_name+'_submitted.csv',
fieldnames=csv_fieldnames,values=[record,])
return node
else:
write_out_csv(data_file_name+'_unsaved_records.csv',
fieldnames=csv_fieldnames,values=[record,])
return False
def submit(data_file, id_tracking_file=node_tracking_file):
log.info('Starting submission of %ss.', node_type)
nodes = []
csv_fieldnames = get_field_header(data_file)
write_csv_headers(data_file,fieldnames=csv_fieldnames)
for record in load_data(data_file):
# if record['consented'] == 'YES' \
# and record['visit_number'] != 'UNK':
if record['visit_number'] != 'UNK':
# use of 'UNK' = hack workaround for unreconciled visit list
log.info('\n...next record...')
try:
log.debug('data record: '+str(record))
# node-specific variables:
load_search_field = 'visit_id'
internal_id = record[load_search_field]
parent_internal_id = record['rand_subject_id']
grand_parent_internal_id = 'prediabetes'
parent_id = get_parent_node_id(
id_tracking_file, parent_type, parent_internal_id)
# grand_parent_id = get_parent_node_id(
# id_tracking_file, grand_parent_type, grand_parent_internal_id)
if parent_id:
node_is_new = False # set to True if newbie
node = load(internal_id, load_search_field)
if not getattr(node, load_search_field):
log.debug('loaded node newbie...')
node_is_new = True
saved = validate_record(parent_id, node, record,
data_file_name=data_file)
if saved:
header = settings.node_id_tracking.id_fields
saved_name = getattr(saved, load_search_field)
vals = values_to_node_dict(
[[node_type.lower(), saved_name, saved.id,
parent_type.lower(), parent_internal_id, parent_id,
get_cur_datetime()]],
header
)
nodes.append(vals)
if node_is_new:
write_out_csv(id_tracking_file,
fieldnames=get_field_header(id_tracking_file),
values=vals)
else:
log.error('No parent_id found for %s', parent_internal_id)
except Exception, e:
log.exception(e)
raise e
else:
write_out_csv(data_file+'_records_no_submit.csv',
fieldnames=record.keys(),values=[record,])
return nodes
if __name__ == '__main__':
pass
|
TheJacksonLaboratory/osdf_submit
|
nodes/visit.py
|
Python
|
gpl-3.0
| 5,197
|
[
"VisIt"
] |
39bde6460ed09f8af8f8a35391299b3ba5270fe129534caa42e352de9b1dac2b
|
import operator
import sys
from collections import defaultdict
from colorsys import rgb_to_hsv
from pathlib import Path
import numpy as np
from PIL import Image
class ColorModel:
version = 20210206
approx_ram_mb = 120
max_num_workers = 2
def __init__(self):
self.colors = {
# Name: ((red, green, blue), ordering)
'Red': ((120, 4, 20), 1),
'Orange': ((245, 133, 0), 2),
'Amber': ((234, 166, 30), 3),
'Yellow': ((240, 240, 39), 4),
'Lime': ((168, 228, 26), 5),
'Green': ((7, 215, 7), 6),
'Teal': ((16, 202, 155), 7),
'Turquoise': ((25, 225, 225), 8),
'Aqua': ((10, 188, 245), 9),
'Azure': ((30, 83, 249), 10),
'Blue': ((0, 0, 255), 11),
'Purple': ((127, 0, 255), 12),
'Orchid': ((190, 0, 255), 13),
'Magenta': ((233, 8, 200), 14),
'White': ((255, 255, 255), 15),
'Gray': ((124, 124, 124), 16),
'Black': ((0, 0, 0), 17),
}
def predict(self, image_file, image_size=32, min_score=0.005):
image = Image.open(image_file)
image = image.resize((image_size, image_size), Image.BICUBIC)
pixels = np.asarray(image)
pixels = [j for i in pixels for j in i]
summed_results = defaultdict(int)
for i, pixel in enumerate(pixels):
best_color = None
best_score = 0
for name, (target, _) in self.colors.items():
score = self.color_distance(pixel, target)
if score > best_score:
best_color = name
best_score = score
if best_color:
summed_results[best_color] += 1
averaged_results = {}
for key, val in summed_results.items():
val = val / (image_size * image_size)
if val >= min_score:
averaged_results[key] = val
sorted_results = sorted(averaged_results.items(), key=operator.itemgetter(1), reverse=True)
return sorted_results
def color_distance(self, a, b):
# Colors are list of 3 floats (RGB) from 0.0 to 1.0
a_h, a_s, a_v = rgb_to_hsv(a[0] / 255, a[1] / 255, a[2] / 255)
b_h, b_s, b_v = rgb_to_hsv(b[0] / 255, b[1] / 255, b[2] / 255)
diff_h = 1 - abs(a_h - b_h) # Hue is more highly weighted than saturation and value
diff_s = 1 - abs(a_s - b_s) * 0.5
diff_v = 1 - abs(a_v - b_v) * 0.25
score = diff_h * diff_s * diff_v
return score
def run_on_photo(photo_id):
model = ColorModel()
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
from photonix.classifiers.runners import results_for_model_on_photo, get_or_create_tag
photo, results = results_for_model_on_photo(model, photo_id)
if photo:
from photonix.photos.models import PhotoTag
photo.clear_tags(source='C', type='C')
for name, score in results:
tag = get_or_create_tag(library=photo.library, name=name, type='C', source='C', ordering=model.colors[name][1])
PhotoTag(photo=photo, tag=tag, source='C', confidence=score, significance=score).save()
return photo, results
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Argument required: image file path')
exit(1)
_, results = run_on_photo(sys.argv[1])
for result in results:
print('{} (score: {:0.10f})'.format(result[0], result[1]))
|
damianmoore/photo-manager
|
photonix/classifiers/color/model.py
|
Python
|
agpl-3.0
| 3,863
|
[
"Amber"
] |
cbefd32f10b8c7dbc0e2d2d31ae4a04932bdb70cbbf0096d915b168efa00ebb9
|
#!/usr/bin/env python
__author__ = "waroquiers"
import os
import random
import shutil
import unittest
import numpy as np
from monty.tempfile import ScratchDir
from pymatgen.analysis.chemenv.coordination_environments.voronoi import (
DetailedVoronoiContainer,
)
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.util.testing import PymatgenTest
json_files_dir = os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"..",
"..",
"test_files",
"chemenv",
"json_test_files",
)
img_files_dir = os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"..",
"..",
"test_files",
"chemenv",
"images",
)
class VoronoiContainerTest(PymatgenTest):
def test_voronoi(self):
with ScratchDir("."):
# Define a cubic lattice and a list of species (to be used for the fake structures)
cubic_lattice = Lattice.cubic(10.0)
species = ["Cu", "O", "O", "O", "O", "O", "O"]
valences = "undefined"
# First fake structure
coords = [[5.0, 5.0, 5.0]]
order_and_coords = [
(1, [4.0, 5.0, 5.0]),
(2, [6.01, 5.0, 5.0]),
(3, [5.0, 3.98, 5.0]),
(4, [5.0, 6.03, 5.0]),
(5, [5.0, 5.0, 3.96]),
(6, [5.0, 5.0, 6.05]),
]
random.shuffle(order_and_coords)
sorted = np.argsort([oc[0] for oc in order_and_coords]) + 1
coords.extend([oc[1] for oc in order_and_coords])
fake_structure = Structure(cubic_lattice, species, coords, coords_are_cartesian=True)
# First fake structure with a given normalized_distance_tolerance of 0.0100001
detailed_voronoi_container = DetailedVoronoiContainer(
structure=fake_structure,
valences=valences,
normalized_distance_tolerance=0.0100001,
isites=[0],
)
self.assertEqual(len(detailed_voronoi_container.voronoi_list2[0]), 6)
neighbors = detailed_voronoi_container.neighbors(0, 1.0, 0.5, True)
self.assertEqual(len(neighbors), 6)
neighbors = detailed_voronoi_container.neighbors(0, 1.02, 0.5, True)
self.assertEqual(len(neighbors), 6)
neighbors = detailed_voronoi_container.neighbors(0, 1.026, 0.5, True)
self.assertEqual(len(neighbors), 6)
neighbors = detailed_voronoi_container.neighbors(0, 1.5, 0.5, True)
self.assertEqual(len(neighbors), 6)
# First fake structure with a given normalized_distance_tolerance of 0.001
detailed_voronoi_container = DetailedVoronoiContainer(
structure=fake_structure,
valences=valences,
normalized_distance_tolerance=0.001,
isites=[0],
)
self.assertEqual(len(detailed_voronoi_container.voronoi_list2[0]), 6)
neighbors = detailed_voronoi_container.neighbors(0, 1.0, 0.5, True)
self.assertEqual(len(neighbors), 1)
self.assertEqual(neighbors[0]["site"], fake_structure[sorted[0]])
neighbors = detailed_voronoi_container.neighbors(0, 1.02, 0.5, True)
nbs = [nb["site"] for nb in neighbors]
self.assertEqual(len(neighbors), 3)
self.assertTrue(fake_structure[sorted[0]] in nbs)
self.assertTrue(fake_structure[sorted[1]] in nbs)
self.assertTrue(fake_structure[sorted[2]] in nbs)
neighbors = detailed_voronoi_container.neighbors(0, 1.026, 0.5, True)
nbs = [nb["site"] for nb in neighbors]
self.assertEqual(len(neighbors), 3)
self.assertTrue(fake_structure[sorted[0]] in nbs)
self.assertTrue(fake_structure[sorted[1]] in nbs)
self.assertTrue(fake_structure[sorted[2]] in nbs)
neighbors = detailed_voronoi_container.neighbors(0, 1.5, 0.5, True)
self.assertEqual(len(neighbors), 6)
# Second fake structure
coords2 = [[5.0, 5.0, 5.0]]
order_and_coords = [
(1, [4.0, 5.0, 5.0]),
(2, [6.01, 5.0, 5.0]),
(3, [5.0, 3.98, 5.0]),
(4, [5.0, 6.07, 5.0]),
(5, [5.0, 5.0, 3.92]),
(6, [5.0, 5.0, 6.09]),
]
random.shuffle(order_and_coords)
sorted = np.argsort([oc[0] for oc in order_and_coords]) + 1
coords2.extend([oc[1] for oc in order_and_coords])
fake_structure2 = Structure(cubic_lattice, species, coords2, coords_are_cartesian=True)
# Second fake structure with a given normalized_distance_tolerance of 0.0100001
detailed_voronoi_container = DetailedVoronoiContainer(
structure=fake_structure2,
valences=valences,
normalized_distance_tolerance=0.0100001,
isites=[0],
)
self.assertEqual(len(detailed_voronoi_container.voronoi_list2[0]), 6)
neighbors = detailed_voronoi_container.neighbors(0, 1.0, 0.5, True)
nbs = [nb["site"] for nb in neighbors]
self.assertEqual(len(neighbors), 3)
self.assertTrue(fake_structure2[sorted[0]] in nbs)
self.assertTrue(fake_structure2[sorted[1]] in nbs)
self.assertTrue(fake_structure2[sorted[2]] in nbs)
neighbors = detailed_voronoi_container.neighbors(0, 1.02, 0.5, True)
nbs = [nb["site"] for nb in neighbors]
self.assertEqual(len(neighbors), 3)
self.assertTrue(fake_structure2[sorted[0]] in nbs)
self.assertTrue(fake_structure2[sorted[1]] in nbs)
self.assertTrue(fake_structure2[sorted[2]] in nbs)
neighbors = detailed_voronoi_container.neighbors(0, 1.026, 0.5, True)
nbs = [nb["site"] for nb in neighbors]
self.assertEqual(len(neighbors), 3)
self.assertTrue(fake_structure2[sorted[0]] in nbs)
self.assertTrue(fake_structure2[sorted[1]] in nbs)
self.assertTrue(fake_structure2[sorted[2]] in nbs)
neighbors = detailed_voronoi_container.neighbors(0, 1.5, 0.5, True)
self.assertEqual(len(neighbors), 6)
species = ["Cu", "Cu", "O", "O", "O", "Cu", "O"]
valences = [2, 2, -2, -2, -2, 2, -2]
# Third fake structure (test of the only_anion_cation_bonds)
coords = [
[5.0, 5.0, 5.0],
[6.01, 5.0, 5.0],
[5.0, 5.0, 3.96],
[4.0, 5.0, 5.0],
[5.0, 6.03, 5.0],
[5.0, 3.98, 5.0],
[5.0, 5.0, 6.05],
]
fake_structure3 = Structure(cubic_lattice, species, coords, coords_are_cartesian=True)
detailed_voronoi_container = DetailedVoronoiContainer(
structure=fake_structure3,
valences=valences,
normalized_distance_tolerance=0.0100001,
isites=[0],
additional_conditions=[DetailedVoronoiContainer.AC.ONLY_ACB],
)
self.assertEqual(len(detailed_voronoi_container.voronoi_list2[0]), 6)
neighbors = detailed_voronoi_container.neighbors(0, 1.01, 0.5, True)
nbs = [nb["site"] for nb in neighbors]
self.assertEqual(len(neighbors), 6)
self.assertTrue(fake_structure3[1] in nbs)
self.assertTrue(fake_structure3[2] in nbs)
self.assertTrue(fake_structure3[3] in nbs)
self.assertTrue(fake_structure3[4] in nbs)
self.assertTrue(fake_structure3[5] in nbs)
self.assertTrue(fake_structure3[6] in nbs)
# Test of the as_dict() and from_dict() methods as well as __eq__ method
other_detailed_voronoi_container = DetailedVoronoiContainer.from_dict(detailed_voronoi_container.as_dict())
self.assertTrue(detailed_voronoi_container, other_detailed_voronoi_container)
def test_get_vertices_dist_ang_indices(self):
with ScratchDir("."):
cubic_lattice = Lattice.cubic(10.0)
species = ["Cu", "O", "O", "O", "O", "O", "O"]
valences = "undefined"
# First fake structure
coords = [
[5.0, 5.0, 5.0],
[6.01, 5.0, 5.0],
[5.0, 5.0, 3.96],
[4.0, 5.0, 5.0],
[5.0, 6.03, 5.0],
[5.0, 3.98, 5.0],
[5.0, 5.0, 6.05],
]
fake_structure = Structure(cubic_lattice, species, coords, coords_are_cartesian=True)
# First fake structure with a given normalized_distance_tolerance of 0.0100001
detailed_voronoi_container = DetailedVoronoiContainer(
structure=fake_structure,
valences=valences,
normalized_distance_tolerance=0.0100001,
isites=[0],
)
fake_parameter_indices_list = []
for ii in range(2, 5):
for jj in range(7, 14):
fake_parameter_indices_list.append((ii, jj))
for ii in range(5, 7):
for jj in range(10, 14):
fake_parameter_indices_list.append((ii, jj))
points = detailed_voronoi_container._get_vertices_dist_ang_indices(fake_parameter_indices_list)
self.assertEqual(points[0], (2, 7))
self.assertEqual(points[1], (4, 7))
self.assertEqual(points[2], (4, 10))
self.assertEqual(points[3], (6, 10))
self.assertEqual(points[4], (6, 13))
self.assertEqual(points[5], (2, 13))
if __name__ == "__main__":
unittest.main()
|
gmatteo/pymatgen
|
pymatgen/analysis/chemenv/coordination_environments/tests/test_voronoi.py
|
Python
|
mit
| 9,915
|
[
"pymatgen"
] |
694ac5df21118c8836187ea9bd6176296261052e9288671d8a9f2a9b7bfa3bfe
|
from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import _insert, add_docstring
from numpy.core.multiarray import digitize, bincount, interp as compiled_interp
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
from numpy.compat.py3k import basestring
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def flip(m, axis):
"""
Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
.. versionadded:: 1.12.0
Parameters
----------
m : array_like
Input array.
axis: integer
Axis in array, which entries are reversed.
Returns
-------
out : array_like
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
See Also
--------
flipud : Flip an array vertically (axis=0).
fliplr : Flip an array horizontally (axis=1).
Notes
-----
flip(m, 0) is equivalent to flipud(m).
flip(m, 1) is equivalent to fliplr(m).
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
Examples
--------
>>> A = np.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> flip(A, 0)
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> flip(A, 1)
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> A = np.random.randn(3,4,5)
>>> np.all(flip(A,2) == A[:,:,::-1,...])
True
"""
if not hasattr(m, 'ndim'):
m = asarray(m)
indexer = [slice(None)] * m.ndim
try:
indexer[axis] = slice(None, None, -1)
except IndexError:
raise ValueError("axis=%i is invalid for the %i-dimensional input array"
% (axis, m.ndim))
return m[tuple(indexer)]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : bool
Return ``True`` if the object has an iterator method or is a
sequence and ``False`` otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
True
>>> np.iterable(2)
False
"""
try:
iter(y)
except TypeError:
return False
return True
def _hist_bin_sqrt(x):
"""
Square root histogram bin estimator.
Bin width is inversely proportional to the data size. Used by many
programs for its simplicity.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / np.sqrt(x.size)
def _hist_bin_sturges(x):
"""
Sturges histogram bin estimator.
A very simplistic estimator based on the assumption of normality of
the data. This estimator has poor performance for non-normal data,
which becomes especially obvious for large data sets. The estimate
depends only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / (np.log2(x.size) + 1.0)
def _hist_bin_rice(x):
"""
Rice histogram bin estimator.
Another simple estimator with no normality assumption. It has better
performance for large data than Sturges, but tends to overestimate
the number of bins. The number of bins is proportional to the cube
root of data size (asymptotically optimal). The estimate depends
only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / (2.0 * x.size ** (1.0 / 3))
def _hist_bin_scott(x):
"""
Scott histogram bin estimator.
The binwidth is proportional to the standard deviation of the data
and inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
def _hist_bin_doane(x):
"""
Doane's histogram bin estimator.
Improved version of Sturges' formula which works better for
non-normal data. See
http://stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
if sigma > 0.0:
# These three operations add up to
# g1 = np.mean(((x - np.mean(x)) / sigma)**3)
# but use only one temp array instead of three
temp = x - np.mean(x)
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
return x.ptp() / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
def _hist_bin_fd(x):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 1 for the number of bins.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
def _hist_bin_auto(x):
"""
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators.
The FD estimator is usually the most robust method, but its width
estimate tends to be too large for small `x`. The Sturges estimator
is quite good for small (<1000) datasets and is the default in the R
language. This method gives good off the shelf behaviour.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
See Also
--------
_hist_bin_fd, _hist_bin_sturges
"""
# There is no need to check for zero here. If ptp is, so is IQR and
# vice versa. Either both are zero or neither one is.
return min(_hist_bin_fd(x), _hist_bin_sturges(x))
# Private dict initialized at module load time
_hist_bin_selectors = {'auto': _hist_bin_auto,
'doane': _hist_bin_doane,
'fd': _hist_bin_fd,
'rice': _hist_bin_rice,
'scott': _hist_bin_scott,
'sqrt': _hist_bin_sqrt,
'sturges': _hist_bin_sturges}
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
r"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string from the list below, `histogram` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all round performance
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size .
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the ``density``
keyword instead. If ``False``, the result will contain the
number of samples in each bin. If ``True``, the result is the
value of the probability *density* function at the bin,
normalized such that the *integral* over the range is 1. Note
that this latter behavior is known to be buggy with unequal bin
widths; use ``density`` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the ``normed`` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words,
if `bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
.. versionadded:: 1.11.0
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))`.
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'FD' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'Scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'Rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'Sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'Doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1})}
g_1 = mean[(\frac{x - \mu}{\sigma})^3]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'Sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data
with 2000 points:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
>>> plt.hist(a, bins='auto') # plt.hist passes it's arguments to np.histogram
>>> plt.title("Histogram with 'auto' bins")
>>> plt.show()
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
# Do not modify the original value of range so we can check for `None`
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
mn, mx = 0.0, 1.0
else:
mn, mx = a.min() + 0.0, a.max() + 0.0
else:
mn, mx = [mi + 0.0 for mi in range]
if mn > mx:
raise ValueError(
'max must be larger than min in range parameter.')
if not np.all(np.isfinite([mn, mx])):
raise ValueError(
'range parameter must be finite.')
if mn == mx:
mn -= 0.5
mx += 0.5
if isinstance(bins, basestring):
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
if bins not in _hist_bin_selectors:
raise ValueError("{0} not a valid estimator for bins".format(bins))
if weights is not None:
raise TypeError("Automated estimation of the number of "
"bins is not supported for weighted data")
# Make a reference to `a`
b = a
# Update the reference if the range needs truncation
if range is not None:
keep = (a >= mn)
keep &= (a <= mx)
if not np.logical_and.reduce(keep):
b = a[keep]
if b.size == 0:
bins = 1
else:
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bins](b)
if width:
bins = int(np.ceil((mx - mn) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
bins = 1
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
# At this point, if the weights are not integer, floating point, or
# complex, we have to use the slow algorithm.
if weights is not None and not (np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, np.complex)):
bins = linspace(mn, mx, bins + 1, endpoint=True)
if not iterable(bins):
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
# Initialize empty histogram
n = np.zeros(bins, ntype)
# Pre-compute histogram scaling factor
norm = bins / (mx - mn)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= mn)
keep &= (tmp_a <= mx)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
tmp_a = tmp_a.astype(float)
tmp_a -= mn
tmp_a *= norm
# Compute the bin indices, and for values that lie exactly on mx we
# need to subtract one
indices = tmp_a.astype(np.intp)
indices[indices == bins] -= 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins)
n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins)
else:
n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)
# We now compute the bin edges since these are returned
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise ValueError(
'bins must increase monotonically.')
# Initialize empty histogram
n = np.zeros(bins.shape, ntype)
if weights is None:
for i in arange(0, len(a), BLOCK):
sa = sort(a[i:i+BLOCK])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
if not np.all(np.isfinite(range)):
raise ValueError(
'range parameter must be finite.')
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : array_type or double
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
a = np.asanyarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
wgt = np.asanyarray(weights)
if issubclass(a.dtype.type, (np.integer, np.bool_)):
result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')
else:
result_dtype = np.result_type(a.dtype, wgt.dtype)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape)
wgt = wgt.swapaxes(-1, axis)
scl = wgt.sum(axis=axis, dtype=result_dtype)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl
if returned:
scl = np.broadcast_to(scl, avg.shape)
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print('ValueError')
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
# Only able to stack vertically if the array is 1d or less
if x.ndim <= 1:
condlist = np.vstack([condlist, ~totlist])
else:
condlist = [asarray(c, dtype=bool) for c in condlist]
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
# 2014-02-24, 1.9
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it separately optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
# 2014-02-24, 1.9
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : scalar or list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
single scalar specifies sample distance for all dimensions.
if `axis` is given, the number of varargs must equal the number of axes.
edge_order : {1, 2}, optional
Gradient is calculated using N-th order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
axis : None or int or tuple of ints, optional
Gradient is calculated only along the given axis or axes
The default (axis = None) is to calculate the gradient for all the axes of the input array.
axis may be negative, in which case it counts from the last to the first axis.
.. versionadded:: 1.11.0
Returns
-------
gradient : list of ndarray
Each element of `list` has the same shape as `f` giving the derivative
of `f` with respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
The axis keyword can be used to specify a subset of axes of which the gradient is calculated
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0)
array([[ 2., 2., -1.],
[ 2., 2., -1.]])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
axes = kwargs.pop('axis', None)
if axes is None:
axes = tuple(range(N))
# check axes to have correct type and no duplicate entries
if isinstance(axes, int):
axes = (axes,)
if not isinstance(axes, tuple):
raise TypeError("A tuple of integers or a single integer is required")
# normalize axis values:
axes = tuple(x + N if x < 0 else x for x in axes)
if max(axes) >= N or min(axes) < 0:
raise ValueError("'axis' entry is out of bounds")
if len(set(axes)) != len(axes):
raise ValueError("duplicate value in 'axis'")
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == len(axes):
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for i, axis in enumerate(axes):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[i]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if len(axes) == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th discrete difference along given axis.
The first difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The n-th differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : float or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
"""
if period is None:
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
else:
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
return_array = True
if isinstance(x, (float, int, number)):
return_array = False
x = [x]
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
fp = np.asarray(fp, dtype=np.float64)
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError("Data points must be 1-D sequences")
if xp.shape[0] != fp.shape[0]:
raise ValueError("fp and xp are not of the same length")
# normalizing periodic boundaries
x = x % period
xp = xp % period
asort_xp = np.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
if return_array:
return compiled_interp(x, xp, fp, left, right)
else:
return compiled_interp(x, xp, fp, left, right).item()
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : ndarray
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N, it will be repeated, and if elements of `a` are to be masked,
this sequence must be non-empty.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
if not isinstance(arr, np.ndarray):
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(arr).__name__))
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
"""
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N - 1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True, then
normalization is by ``N``. These values can be overridden by using the
keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
.. versionadded:: 1.5
fweights : array_like, int, optional
1-D array of integer freguency weights; the number of times each
observation vector should be repeated.
.. versionadded:: 1.10
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
.. versionadded:: 1.10
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=1, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print(np.cov(X))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print(np.cov(x, y))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print(np.cov(x))
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if m.ndim > 2:
raise ValueError("m has more than 2 dimensions")
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
if y.ndim > 2:
raise ValueError("y has more than 2 dimensions")
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if rowvar == 0 and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
if rowvar == 0 and y.shape[0] != 1:
y = y.T
X = np.vstack((X, y))
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=np.float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=np.float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = X.shape[1] - ddof
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof*sum(w*aweights)/w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
c = dot(X, X_T.conj())
c *= 1. / np.float64(fact)
return c.squeeze()
def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
.. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
ddof : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
Notes
-----
Due to floating point rounding the resulting array may not be Hermitian,
the diagonal elements may not be 1, and the elements may not satisfy the
inequality abs(a) <= 1. The real and imaginary parts are clipped to the
interval [-1, 1] in an attempt to improve on that situation but is not
much help in the complex case.
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no effect and are deprecated',
DeprecationWarning)
c = cov(x, y, rowvar)
try:
d = diag(c)
except ValueError:
# scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
stddev = sqrt(d.real)
c /= stddev[:, None]
c /= stddev[None, :]
# Clip real and imaginary parts to [-1, 1]. This does not guarantee
# abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without
# excessive work.
np.clip(c.real, -1, 1, out=c.real)
if np.iscomplexobj(c):
np.clip(c.imag, -1, 1, out=c.imag)
return c
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function capable of receiving a single axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`median`. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. If `overwrite_input` is ``True`` and `a` is not already an
`ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float64``, then the output data-type is
``np.float64``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, percentile
Notes
-----
Given a vector ``V`` of length ``N``, the median of ``V`` is the
middle value of a sorted copy of ``V``, ``V_sorted`` - i
e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the
two middle values of ``V_sorted`` when ``N`` is even.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
# Set the partition indexes
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
kth = [szh - 1, szh]
else:
kth = [(sz - 1) // 2]
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
kth.append(-1)
if overwrite_input:
if axis is None:
part = a.ravel()
part.partition(kth)
else:
a.partition(kth, axis=axis)
part = a
else:
part = partition(a, kth, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact) and sz > 0:
# warn and return nans like mean would
rout = mean(part[indexer], axis=axis, out=out)
part = np.rollaxis(part, axis, part.ndim)
n = np.isnan(part[..., -1])
if rout.ndim == 0:
if n == True:
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if out is not None:
out[...] = a.dtype.type(np.nan)
rout = out
else:
rout = a.dtype.type(np.nan)
elif np.count_nonzero(n.ravel()) > 0:
warnings.warn("Invalid value encountered in median for" +
" %d results" % np.count_nonzero(n.ravel()),
RuntimeWarning)
rout[n] = np.nan
return rout
else:
# if there are no nans
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile(s) of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute, which must be between 0 and 100 inclusive.
axis : {int, sequence of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the array. A sequence of axes is supported since
version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a`
calculations. The input array will be modified by the call to
`percentile`. This will save memory when you do not need to
preserve the contents of the input array. In this case you
should not make any assumptions about the contents of the input
`a` after this function completes -- treat it as undefined.
Default is False. If `a` is not already an array, this parameter
will have no effect as `a` will be converted to an array
internally regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, median, nanpercentile
Notes
-----
Given a vector ``V`` of length ``N``, the ``q``-th percentile of
``V`` is the value ``q/100`` of the way from the mimumum to the
maximum in in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the percentile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=50``, the same as the minimum if ``q=0`` and the
same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
>>> np.percentile(a, 50, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=out)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a == b)
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = 0.5 * (floor(indices) + ceil(indices))
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
n = np.array(False, dtype=bool) # check for nan's flag
if indices.dtype == intp: # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = concatenate((indices, [-1]))
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = indices[:-1]
n = np.isnan(ap[-1:, ...])
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = concatenate((indices_above, [-1]))
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
weights_below = np.rollaxis(weights_below, axis, 0)
weights_above = np.rollaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = np.isnan(ap[-1:, ...])
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
if np.any(n):
warnings.warn("Invalid value encountered in percentile",
RuntimeWarning)
if zerod:
if ap.ndim == 1:
if out is not None:
out[...] = a.dtype.type(np.nan)
r = out
else:
r = a.dtype.type(np.nan)
else:
r[..., n.squeeze(0)] = a.dtype.type(np.nan)
else:
if r.ndim == 1:
r[:] = a.dtype.type(np.nan)
else:
r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
The sample points corresponding to the `y` values. If `x` is None,
the sample points are assumed to be evenly spaced `dx` apart. The
default is None.
dx : scalar, optional
The spacing between sample points when `x` is None. The default is 1.
axis : int, optional
The axis along which to integrate.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""
Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy(order=arrorder)
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy(order=arrorder))
else:
return arr.copy(order=arrorder)
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arrorder)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
# 2013-09-24, 1.9
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays along an existing axis.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy(order=arrorder)
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arrorder)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
|
rherault-insa/numpy
|
numpy/lib/function_base.py
|
Python
|
bsd-3-clause
| 152,157
|
[
"Gaussian"
] |
b1da60468bcaf341a187154e9c588efb33f63676e72999e1095f5369ca683923
|
import operator
from ..node import NodeVisitor, DataNode, ConditionalNode, KeyValueNode, ListNode, ValueNode
from ..parser import parse
class ConditionalValue(object):
def __init__(self, node, condition_func):
self.node = node
self.condition_func = condition_func
if isinstance(node, ConditionalNode):
assert len(node.children) == 2
self.condition_node = self.node.children[0]
self.value_node = self.node.children[1]
else:
assert isinstance(node, (ValueNode, ListNode))
self.condition_node = None
self.value_node = self.node
@property
def value(self):
if isinstance(self.value_node, ValueNode):
return self.value_node.data
else:
return [item.data for item in self.value_node.children]
@value.setter
def value(self, value):
self.value_node.data = value
def __call__(self, run_info):
return self.condition_func(run_info)
def set_value(self, value):
self.value = value
def remove(self):
if len(self.node.parent.children) == 1:
self.node.parent.remove()
self.node.remove()
class Compiler(NodeVisitor):
def compile(self, tree, data_cls_getter=None, **kwargs):
"""Compile a raw AST into a form where conditional expressions
are represented by ConditionalValue objects that can be evaluated
at runtime.
tree - The root node of the wptmanifest AST to compile
data_cls_getter - A function taking two parameters; the previous
output node and the current ast node and returning
the class of the output node to use for the current
ast node
"""
if data_cls_getter is None:
self.data_cls_getter = lambda x, y: ManifestItem
else:
self.data_cls_getter = data_cls_getter
self.tree = tree
self.output_node = self._initial_output_node(tree, **kwargs)
self.visit(tree)
if hasattr(self.output_node, "set_defaults"):
self.output_node.set_defaults()
assert self.output_node is not None
return self.output_node
def compile_condition(self, condition):
"""Compile a ConditionalNode into a ConditionalValue.
condition: A ConditionalNode"""
data_node = DataNode()
key_value_node = KeyValueNode()
key_value_node.append(condition.copy())
data_node.append(key_value_node)
manifest_item = self.compile(data_node)
return manifest_item._data[None][0]
def _initial_output_node(self, node, **kwargs):
return self.data_cls_getter(None, None)(node, **kwargs)
def visit_DataNode(self, node):
if node != self.tree:
output_parent = self.output_node
self.output_node = self.data_cls_getter(self.output_node, node)(node)
else:
output_parent = None
assert self.output_node is not None
for child in node.children:
self.visit(child)
if output_parent is not None:
# Append to the parent *after* processing all the node data
output_parent.append(self.output_node)
self.output_node = self.output_node.parent
assert self.output_node is not None
def visit_KeyValueNode(self, node):
key_values = []
for child in node.children:
condition, value = self.visit(child)
key_values.append(ConditionalValue(child, condition))
self.output_node._add_key_value(node, key_values)
def visit_ListNode(self, node):
return (lambda x:True, [self.visit(child) for child in node.children])
def visit_ValueNode(self, node):
return (lambda x: True, node.data)
def visit_AtomNode(self, node):
return (lambda x: True, node.data)
def visit_ConditionalNode(self, node):
return self.visit(node.children[0]), self.visit(node.children[1])
def visit_StringNode(self, node):
indexes = [self.visit(child) for child in node.children]
def value(x):
rv = node.data
for index in indexes:
rv = rv[index(x)]
return rv
return value
def visit_NumberNode(self, node):
if "." in node.data:
return lambda x: float(node.data)
else:
return lambda x: int(node.data)
def visit_VariableNode(self, node):
indexes = [self.visit(child) for child in node.children]
def value(x):
data = x[node.data]
for index in indexes:
data = data[index(x)]
return data
return value
def visit_IndexNode(self, node):
assert len(node.children) == 1
return self.visit(node.children[0])
def visit_UnaryExpressionNode(self, node):
assert len(node.children) == 2
operator = self.visit(node.children[0])
operand = self.visit(node.children[1])
return lambda x: operator(operand(x))
def visit_BinaryExpressionNode(self, node):
assert len(node.children) == 3
operator = self.visit(node.children[0])
operand_0 = self.visit(node.children[1])
operand_1 = self.visit(node.children[2])
assert operand_0 is not None
assert operand_1 is not None
return lambda x: operator(operand_0(x), operand_1(x))
def visit_UnaryOperatorNode(self, node):
return {"not": operator.not_}[node.data]
def visit_BinaryOperatorNode(self, node):
return {"and": operator.and_,
"or": operator.or_,
"==": operator.eq,
"!=": operator.ne}[node.data]
class ManifestItem(object):
def __init__(self, node=None, **kwargs):
self.node = node
self.parent = None
self.children = []
self._data = {}
def __repr__(self):
return "<ManifestItem %s>" % (self.node.data)
def __str__(self):
rv = [repr(self)]
for item in self.children:
rv.extend(" %s" % line for line in str(item).split("\n"))
return "\n".join(rv)
def __contains__(self, key):
return key in self._data
@property
def is_empty(self):
if self._data:
return False
return all(child.is_empty for child in self.children)
@property
def root(self):
node = self
while node.parent is not None:
node = node.parent
return node
@property
def name(self):
return self.node.data
def has_key(self, key):
for node in [self, self.root]:
if key in node._data:
return True
return False
def get(self, key, run_info=None):
if run_info is None:
run_info = {}
for node in [self, self.root]:
if key in node._data:
for cond_value in node._data[key]:
try:
matches = cond_value(run_info)
except KeyError:
matches = False
if matches:
return cond_value.value
raise KeyError
def set(self, key, value, condition=None):
# First try to update the existing value
if key in self._data:
cond_values = self._data[key]
for cond_value in cond_values:
if cond_value.condition_node == condition:
cond_value.value = value
return
# If there isn't a conditional match reuse the existing KeyValueNode as the
# parent
node = None
for child in self.node.children:
if child.data == key:
node = child
break
assert node is not None
else:
node = KeyValueNode(key)
self.node.append(node)
value_node = ValueNode(value)
if condition is not None:
conditional_node = ConditionalNode()
conditional_node.append(condition)
conditional_node.append(value_node)
node.append(conditional_node)
cond_value = Compiler().compile_condition(conditional_node)
else:
node.append(value_node)
cond_value = ConditionalValue(value_node, lambda x: True)
# Update the cache of child values. This is pretty annoying and maybe
# it should just work directly on the tree
if key not in self._data:
self._data[key] = []
if self._data[key] and self._data[key][-1].condition_node is None:
self._data[key].insert(len(self._data[key]) - 1, cond_value)
else:
self._data[key].append(cond_value)
def _add_key_value(self, node, values):
"""Called during construction to set a key-value node"""
self._data[node.data] = values
def append(self, child):
self.children.append(child)
child.parent = self
if child.node.parent != self.node:
self.node.append(child.node)
return child
def remove(self):
if self.parent:
self.parent._remove_child(self)
def _remove_child(self, child):
self.children.remove(child)
child.parent = None
def iterchildren(self, name=None):
for item in self.children:
if item.name == name or name is None:
yield item
def _flatten(self):
rv = {}
for node in [self, self.root]:
for name, value in node._data.iteritems():
if name not in rv:
rv[name] = value
return rv
def iteritems(self):
for item in self._flatten().iteritems():
yield item
def iterkeys(self):
for item in self._flatten().iterkeys():
yield item
def remove_value(self, key, value):
self._data[key].remove(value)
if not self._data[key]:
del self._data[key]
value.remove()
def compile_ast(ast, data_cls_getter=None, **kwargs):
return Compiler().compile(ast, data_cls_getter=data_cls_getter, **kwargs)
def compile(stream, data_cls_getter=None, **kwargs):
return compile_ast(parse(stream),
data_cls_getter=data_cls_getter,
**kwargs)
|
anthgur/servo
|
tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/wptmanifest/backends/conditional.py
|
Python
|
mpl-2.0
| 10,484
|
[
"VisIt"
] |
ff2d4db9e4954a2dd372bdb629ba516eed77c905ad296e46d4f57c3baafad44a
|
"""
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gpr_co2_001.png': (1, 350),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet, is_backref=False):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. only:: html\n\n')
out.append(' .. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
if is_backref:
out.append('.. only:: not html\n\n * :ref:`example_%s`' % ref_name)
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet, is_backref=True))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
|
ssaeger/scikit-learn
|
doc/sphinxext/gen_rst.py
|
Python
|
bsd-3-clause
| 40,192
|
[
"VisIt"
] |
c8cc1613111a522c2144f7dc04650c69cd2c3a25bf444cd4fabb4e60991dfdd4
|
#!/usr/bin/env python
import os
import sys
import requests
import requests_cache
from bs4 import BeautifulSoup
from pprint import pprint
from lib.csvtools import dict_to_csv
def main():
requests_cache.install_cache('scraper_cache')
mfers = {}
website = 'http://southcarolinasccoc.weblinkconnect.com/Manufacturing'
r = requests.get(website)
soup = BeautifulSoup(r.text, 'html.parser')
## Get the manufacturer names and addresses from the json script
scripts = soup.findAll('script')
for script in scripts:
if 'var listingLatitude' in script.text:
lines = script.text.split('\n')
for line in lines:
if 'addressesToMap' in line:
continue
if not line.strip():
continue
if line.startswith('var'):
line = line[135:]
#print line
try:
name = line.split(';')[2].split('>')[2].split('<')[0]
except Exception as e:
print e
print line
import pdb; pdb.set_trace()
street = line.split(';')[2].split('>')[4].split('<')[0]
city = line.split(';')[2].split('>')[5].split('<')[0].split(',')[0]
state = line.split(';')[2].split('>')[5].split('<')[0].split(',')[1].replace(' ', '').strip()
zipcode = line.split(';')[4].split('<')[0]
#import pdb; pdb.set_trace()
mfers[name] = {}
mfers[name]['name'] = name
mfers[name]['street'] = street
mfers[name]['city'] = city
mfers[name]['state'] = state
mfers[name]['zipcode'] = zipcode
mfers[name]['phone'] = ""
mfers[name]['contact'] = ""
mfers[name]['website'] = ""
## Get the contact names and phone numbers from the ListingResults divs
listresults1 = soup.findAll('div', {'class': 'ListingResults_Level1_CONTAINER'})
listresults2 = soup.findAll('div', {'class': 'ListingResults_Level2_CONTAINER'})
listresults3 = soup.findAll('div', {'class': 'ListingResults_Level3_CONTAINER'})
listresults4 = soup.findAll('div', {'class': 'ListingResults_Level4_CONTAINER'})
all_listresults = listresults1 + listresults2 + listresults3 + listresults4
for lr1 in all_listresults:
#print lr1
mfername = ""
phone_num = ""
contact = ""
## use the hrefs+imgs to figure out the mfer name in this cell
links = lr1.findAll('a')
for link in links:
badwords = ['learn more', 'visit site', 'show on map']
if link.text.lower() in badwords:
continue
href = link.attrs['href']
mfername = link.find('img').attrs['alt']
break
## more names here than in the js ... huwhat?
if mfername not in mfers:
mfers[mfername] = {}
mfers[mfername]['name'] = mfername
mfers[mfername]['street'] = ""
mfers[mfername]['city'] = ""
mfers[mfername]['state'] = ""
mfers[mfername]['zipcode'] = ""
mfers[mfername]['phone'] = ""
mfers[mfername]['contact'] = ""
mfers[mfername]['website'] = ""
#import pdb; pdb.set_trace()
## get the location if not already known
if not mfers[mfername]['state']:
addydiv = lr1.find('div', {'itemprop': 'address'})
mfers[mfername]['street'] = addydiv.find('span', {'itemprop': 'street-address'}).text
mfers[mfername]['city'] = addydiv.find('span', {'itemprop': 'locality'}).text
mfers[mfername]['state'] = addydiv.find('span', {'itemprop': 'region'}).text
mfers[mfername]['zipcode'] = addydiv.find('span', {'itemprop': 'postal-code'}).text
#import pdb; pdb.set_trace()
## phone number is set as an image (not all have a phone)...
try:
phone_img = lr1.find('img', {'src': '/external/wcpages/images/phone.gif'})
phone_num = phone_img.text.strip().encode('ascii', 'ignore')
mfers[mfername]['phone'] = phone_num
except Exception as e:
pass
## set the website
try:
mfers[mfername]['website'] = lr1.find('a', {'target': '_blank'}).attrs['href']
except Exception as e:
#print e
#import pdb; pdb.set_trace()
pass
# ListingResults_Level3_MAINCONTACT
try:
mfers[mfername]['contact'] = lr1.find('div', {'class': 'ListingResults_Level3_MAINCONTACT'}).text
except Exception as e:
pass
#if mfername == 'Bose Corporation':
# import pdb; pdb.set_trace()
####################################################
# CSV PRINT
####################################################
#pprint(mfers)
mfnames = sorted(mfers.keys())
keys = mfers[mfnames[0]].keys()
keys = [x for x in keys if x != 'name']
print "manufacturer," + ','.join(keys)
for k in mfnames:
v = mfers[k]
sys.stdout.write('"' + k + '"' + ',')
for key in keys:
sys.stdout.write('"' + v.get(key, "") + '"' + ',')
sys.stdout.write('\n')
#import pdb; pdb.set_trace()
dict_to_csv(mfers, 'manufacturers.csv')
if __name__ == "__main__":
main()
|
jctanner/scrapers
|
manufacturers-scraper.py
|
Python
|
apache-2.0
| 5,528
|
[
"VisIt"
] |
6bac9c1f4e356cf69da250c3d0be42357aab0bcd5e12f5916dd9203a95006f13
|
# -*- coding: utf-8 -*-
"""
Django-Select2 Widgets.
These components are responsible for rendering
the necessary HTML data markups. Since this whole
package is to render choices using Select2 JavaScript
library, hence these components are meant to be used
with choice fields.
Widgets are generally of two types:
1. **Light** --
They are not meant to be used when there
are too many options, say, in thousands.
This is because all those options would
have to be pre-rendered onto the page
and JavaScript would be used to search
through them. Said that, they are also one
the most easiest to use. They are a
drop-in-replacement for Django's default
select widgets.
2. **Heavy** --
They are suited for scenarios when the number of options
are large and need complex queries (from maybe different
sources) to get the options.
This dynamic fetching of options undoubtedly requires
Ajax communication with the server. Django-Select2 includes
a helper JS file which is included automatically,
so you need not worry about writing any Ajax related JS code.
Although on the server side you do need to create a view
specifically to respond to the queries.
3. **Model** --
Model-widgets are a further specialized versions of Heavies.
These do not require views to serve Ajax requests.
When they are instantiated, they register themselves
with one central view which handles Ajax requests for them.
Heavy widgets have the word 'Heavy' in their name.
Light widgets are normally named, i.e. there is no
'Light' word in their names.
.. inheritance-diagram:: django_select2.forms
:parts: 1
"""
from __future__ import absolute_import, unicode_literals
from functools import reduce
from itertools import chain
from django import forms
from django.core import signing
from django.core.urlresolvers import reverse_lazy
from django.db.models import Q
from django.forms.models import ModelChoiceIterator
from django.utils.encoding import force_text
from django.contrib.staticfiles.templatetags.staticfiles import static
from .conf import settings
class Select2Mixin(object):
"""
The base mixin of all Select2 widgets.
This mixin is responsible for rendering the necessary
data attributes for select2 as well as adding the static
form media.
"""
def build_attrs(self, extra_attrs=None, **kwargs):
"""Add select2 data attributes."""
attrs = super(Select2Mixin, self).build_attrs(extra_attrs=extra_attrs, **kwargs)
if self.is_required:
attrs.setdefault('data-allow-clear', 'false')
else:
attrs.setdefault('data-allow-clear', 'true')
attrs.setdefault('data-placeholder', '')
attrs.setdefault('data-minimum-input-length', 0)
if 'class' in attrs:
attrs['class'] += ' django-select2'
else:
attrs['class'] = 'django-select2'
return attrs
def render_options(self, choices, selected_choices):
"""Render options including an empty one, if the field is not required."""
output = '<option></option>' if not self.is_required else ''
output += super(Select2Mixin, self).render_options(choices, selected_choices)
return output
def _get_media(self):
"""
Construct Media as a dynamic property.
.. Note:: For more information visit
https://docs.djangoproject.com/en/1.8/topics/forms/media/#media-as-a-dynamic-property
"""
return forms.Media(
js=(settings.SELECT2_JS_URL,
static('django_select2/django_select2.js')),
css={'screen': (settings.SELECT2_CSS_URL,)}
)
media = property(_get_media)
class Select2TagMixin(object):
"""Mixin to add select2 tag functionality."""
def build_attrs(self, extra_attrs=None, **kwargs):
"""Add select2's tag attributes."""
self.attrs.setdefault('data-minimum-input-length', 1)
self.attrs.setdefault('data-tags', 'true')
self.attrs.setdefault('data-token-separators', [",", " "])
return super(Select2TagMixin, self).build_attrs(extra_attrs, **kwargs)
class Select2Widget(Select2Mixin, forms.Select):
"""
Select2 drop in widget.
Example usage::
class MyModelForm(forms.ModelForm):
class Meta:
model = MyModel
fields = ('my_field', )
widgets = {
'my_field': Select2Widget
}
or::
class MyForm(forms.Form):
my_choice = forms.ChoiceField(widget=Select2Widget)
"""
pass
class Select2MultipleWidget(Select2Mixin, forms.SelectMultiple):
"""
Select2 drop in widget for multiple select.
Works just like :class:`.Select2Widget` but for multi select.
"""
pass
class Select2TagWidget(Select2TagMixin, Select2Mixin, forms.SelectMultiple):
"""
Select2 drop in widget for for tagging.
Example for :class:`.django.contrib.postgres.fields.ArrayField`::
class MyWidget(Select2TagWidget):
def value_from_datadict(self, data, files, name):
values = super(MyWidget, self).value_from_datadict(data, files, name):
return ",".join(values)
"""
pass
class HeavySelect2Mixin(Select2Mixin):
"""Mixin that adds select2's ajax options."""
def __init__(self, **kwargs):
"""
Return HeavySelect2Mixin.
:param data_view: url pattern name
:type data_view: str
:param data_url: url
:type data_url: str
:return:
"""
self.data_view = kwargs.pop('data_view', None)
self.data_url = kwargs.pop('data_url', None)
if not (self.data_view or self.data_url):
raise ValueError('You must ether specify "data_view" or "data_url".')
self.userGetValTextFuncName = kwargs.pop('userGetValTextFuncName', 'null')
super(HeavySelect2Mixin, self).__init__(**kwargs)
def get_url(self):
"""Return url from instance or by reversing :attr:`.data_view`."""
if self.data_url:
return self.data_url
return reverse_lazy(self.data_view)
def build_attrs(self, extra_attrs=None, **kwargs):
"""Set select2's ajax attributes."""
attrs = super(HeavySelect2Mixin, self).build_attrs(extra_attrs=extra_attrs, **kwargs)
# encrypt instance Id
self.widget_id = signing.dumps(id(self))
attrs['data-field_id'] = self.widget_id
attrs.setdefault('data-ajax--url', self.get_url())
attrs.setdefault('data-ajax--cache', "true")
attrs.setdefault('data-ajax--type', "GET")
attrs.setdefault('data-minimum-input-length', 2)
attrs['class'] += ' django-select2-heavy'
return attrs
def render_options(self, choices, selected_choices):
"""Render only selected options."""
output = ['<option></option>' if not self.is_required else '']
if isinstance(self.choices, ModelChoiceIterator):
self.choices.queryset = self.choices.queryset.filter(pk__in=[i for i in selected_choices if isinstance(i, (int, long))])
choices = set(self.choices)
else:
choices = {(k, v) for k, v in self.choices if k in selected_choices}
choices.update((k, v) for k, v in choices if k in selected_choices)
selected_choices = {force_text(v) for v in selected_choices}
for option_value, option_label in choices:
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
class HeavySelect2Widget(HeavySelect2Mixin, forms.Select):
"""
Select2 widget with AJAX support.
Usage example::
class MyWidget(HeavySelectWidget):
data_view = 'my_view_name'
or::
class MyForm(forms.Form):
my_field = forms.ChoicesField(
widget=HeavySelectWidget(
data_url='/url/to/json/response'
)
)
"""
pass
class HeavySelect2MultipleWidget(HeavySelect2Mixin, forms.SelectMultiple):
"""Select2 multi select widget similar to :class:`.HeavySelect2Widget`."""
pass
class HeavySelect2TagWidget(Select2TagMixin, HeavySelect2MultipleWidget):
"""Select2 tag widget."""
pass
# Auto Heavy widgets
class ModelSelect2Mixin(object):
"""Widget mixin that provides attributes and methods for :class:`.AutoResponseView`."""
model = None
queryset = None
search_fields = []
"""
Model lookups that are used to filter the queryset.
Example::
search_fields = [
'title__icontains',
]
"""
max_results = 25
"""Maximal results returned by :class:`.AutoResponseView`."""
def __init__(self, *args, **kwargs):
"""
Overwrite class parameters if passed as keyword arguments.
:param model: model to select choices from
:type model: django.db.models.Model
:param queryset: queryset to select choices from
:type queryset: django.db.models.query.QuerySet
:param search_fields: list of model lookup strings
:type search_fields: list
:param max_results: max. JsonResponse view page size
:type max_results: int
"""
self.model = kwargs.pop('model', self.model)
self.queryset = kwargs.pop('queryset', self.queryset)
self.search_fields = kwargs.pop('search_fields', self.search_fields)
self.max_results = kwargs.pop('max_results', self.max_results)
defaults = {'data_view': 'django_select2-json'}
defaults.update(kwargs)
super(ModelSelect2Mixin, self).__init__(*args, **defaults)
def filter_queryset(self, term, queryset=None):
"""
Return queryset filtered by search_fields matching the passed term.
:param term: Search term
:type term: str
:return: Filtered queryset
:rtype: :class:`.django.db.models.QuerySet`
"""
if queryset is None:
queryset = self.get_queryset()
search_fields = self.get_search_fields()
select = Q()
term = term.replace('\t', ' ')
term = term.replace('\n', ' ')
for t in [t for t in term.split(' ') if not t == '']:
select &= reduce(lambda x, y: x | Q(**{y: t}), search_fields,
Q(**{search_fields[0]: t}))
return queryset.filter(select).distinct()
def get_queryset(self):
"""
Return queryset based on :attr:`.queryset` or :attr:`.model`.
:return: queryset of available choices
:rtype: :class:`.django.db.models.QuerySet`
"""
if self.queryset is not None:
queryset = self.queryset
elif self.model is not None:
queryset = self.model._default_manager.all()
else:
raise NotImplementedError(
"%(cls)s is missing a QuerySet. Define "
"%(cls)s.model, %(cls)s.queryset, or override "
"%(cls)s.get_queryset()." % {
'cls': self.__class__.__name__
}
)
return queryset
def get_search_fields(self):
"""Return list of lookup names."""
if self.search_fields:
return self.search_fields
raise NotImplementedError('%s, must implement "search_fields".' % self.__class__.__name__)
def render_options(self, choices, selected_choices):
"""Render only selected options and set queryset from :class:`ModelChoicesIterator`."""
output = ['<option></option>' if not self.is_required else '']
if isinstance(self.choices, ModelChoiceIterator):
if not self.queryset:
self.queryset = self.choices.queryset
selected_choices = {c for c in selected_choices
if c not in self.choices.field.empty_values}
choices = {self.choices.choice(obj)
for obj in self.choices.queryset.filter(pk__in=selected_choices)}
else:
choices = chain(choices, self.choices)
choices = {(k, v) for k, v in choices if k in selected_choices}
selected_choices = {force_text(v) for v in selected_choices}
for option_value, option_label in choices:
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
class ModelSelect2Widget(ModelSelect2Mixin, HeavySelect2Widget):
"""
Select2 drop in model select widget.
Example usage::
class MyWidget(ModelSelect2Widget):
search_fields = [
'title__icontains',
]
class MyModelForm(forms.ModelForm):
class Meta:
model = MyModel
fields = ('my_field', )
widgets = {
'my_field': MyWidget,
}
or::
class MyForm(forms.Form):
my_choice = forms.ChoiceField(
widget=ModelSelect2Widget(
model=MyOtherModel,
search_fields=['title__icontains']
)
)
.. tip:: The ModelSelect2(Multiple)Widget will try
to get the queryset from the fields choices.
Therefore you don't need to define a queryset,
if you just drop in the widget for a ForeignKey field.
"""
pass
class ModelSelect2MultipleWidget(ModelSelect2Mixin, HeavySelect2MultipleWidget):
"""
Select2 drop in model multiple select widget.
Works just like :class:`.ModelSelect2Widget` but for multi select.
"""
pass
class ModelSelect2TagWidget(Select2TagMixin, ModelSelect2MultipleWidget):
"""
Select2 model widget with tag support.
This it not a simple drop in widget.
It requires to implement you own :func:`.value_from_datadict`
that adds missing tags to you queryset.
Example::
class MyModelSelect2TagWidget(ModelSelect2TagWidget):
queryset = MyModel.objects.all()
def value_from_datadict(self, data, files, name):
values = super().value_from_datadict(self, data, files, name)
qs = self.queryset.filter(**{'pk__in': list(values)})
pks = set(force_text(getattr(o, pk)) for o in qs)
cleaned_values = []
for val in value:
if force_text(val) not in pks:
val = queryset.create(title=val).pk
cleaned_values.append(val)
return cleaned_values
"""
pass
|
DMOJ/django-select2
|
django_select2/forms.py
|
Python
|
apache-2.0
| 14,773
|
[
"VisIt"
] |
e8199fe9742e0fddf89c190d9d019c09ed448bc1eaa9509ead7f2187377cdf4b
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.modeling.modeler Contains the Modeler class, which selects the appropriate modeler and runs it.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from ...core.basics.configurable import Configurable
from ...core.basics.log import log
from ...core.tools import filesystem as fs
from ..component.component import get_config_file_path, load_modeling_configuration
from .galaxy import GalaxyModeler
from .sed import SEDModeler
from .images import ImagesModeler
from ..core.steps import galaxy_modeling, sed_modeling, images_modeling
# -----------------------------------------------------------------
class Modeler(Configurable):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs:
"""
# Call the constructor of the base class
super(Modeler, self).__init__(*args, **kwargs)
# The modeling path
self.modeling_path = None
# The modeling configuration
self.modeling_config = None
# The specific modeler
self.modeler = None
# -----------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Perform the modeling
self.model(**kwargs)
# -----------------------------------------------------------------
@property
def galaxy_modeling(self):
"""
This function ...
:return:
"""
return self.modeling_config.modeling_type == galaxy_modeling
# -----------------------------------------------------------------
@property
def sed_modeling(self):
"""
This function ...
:return:
"""
return self.modeling_config.modeling_type == sed_modeling
# -----------------------------------------------------------------
@property
def images_modeling(self):
"""
This function ...
:return:
"""
return self.modeling_config.modeling_type == images_modeling
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup fucntion of the base class
super(Modeler, self).setup(**kwargs)
# Set the path to the modeling directory
self.modeling_path = self.config.path
# Check for the presence of the configuration file
if not fs.is_file(get_config_file_path(self.modeling_path)): raise ValueError("The current working directory (" + self.config.path + ") is not a radiative transfer modeling directory (the configuration file is missing)")
else: self.modeling_config = load_modeling_configuration(self.modeling_path)
# -----------------------------------------------------------------
def model(self, **kwargs):
"""
This function ...
:return:
"""
# Inform the user
log.info("Performing the modeling ...")
# Debugging
log.debug("Modeling type: " + self.modeling_config.modeling_type)
# Galaxy modeling
if self.galaxy_modeling: self.model_galaxy(**kwargs)
# SED modeling
elif self.sed_modeling: self.model_sed(**kwargs)
# Images modeling
elif self.images_modeling: self.model_images(**kwargs)
# -----------------------------------------------------------------
def model_galaxy(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Create galaxy modeler
self.modeler = GalaxyModeler(self.config)
# Run the modeler
self.modeler.run(**kwargs)
# -----------------------------------------------------------------
def model_sed(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Create SED modeler
self.modeler = SEDModeler(self.config)
# Run the modeler
self.modeler.run(**kwargs)
# -----------------------------------------------------------------
def model_images(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Create images modeler
self.modeler = ImagesModeler(self.config)
# Run the modeler
self.modeler.run(**kwargs)
# -----------------------------------------------------------------
|
SKIRT/PTS
|
modeling/modeling/modeler.py
|
Python
|
agpl-3.0
| 5,083
|
[
"Galaxy"
] |
40068662f859f9a355baea4e47934dd7cec2100f55434cc5fe8c860f19bf65af
|
Data.days = 7
Data.members = q.QueryCount(
"MemberStatusId = 10[Member]")
Data.uniqueAttends = q.QueryCount(
"RecentAttendCount( Days=7 ) > 0")
Data.newAttends = q.QueryCount(
"HasRecentNewAttend( Days=7, NumberOfDaysForNoAttendance='365' ) = 1[True]")
Data.meetings = q.MeetingCount(Data.days, 0, 0, 0)
Data.numPresent = q.NumPresent(Data.days, 0, 0, 0)
Data.decisions = q.QueryCount("""
RecentDecisionType( Days=7 ) IN (
0[Unknown]
,10[POF for Membership]
,20[POF NOT for Membership]
,30[Letter], 40[Statement]
,50[Stmt requiring Baptism]
)""")
Data.contacts = q.QueryCount("""
RecentContactType( Days=7 ) IN (
4[Card Sent]
,5[EMail Sent]
,6[Info Pack Sent]
,3[Letter Sent]
,7[Other]
,1[Personal Visit]
,2[Phone Call]
,99[Unknown]
)""")
Data.registrations = q.QueryCount("""
RecentRegistrationType( Days=7 ) IN (
1[Join Organization]
, 10[User Selects Organization]
, 11[Compute Org By Birthday]
, 15[Manage Subscriptions]
, 14[Manage Recurring Giving]
, 8[Online Giving]
, 9[Online Pledge]
, 16[Special Script]
)""")
fund = 0 # 0 is for all funds
week = 7
weeksinyear = 52
year = weeksinyear * week
oneweekago = week
twoweeksago = week * 2
fiveweeksago = week * 5
oneyearago = year + oneweekago
twoyearsago = year * 2 + oneweekago
Data.cnAmtPrev7 = q.ContributionTotals(twoweeksago, oneweekago, fund)
Data.cnCntPrev7 = q.ContributionCount(twoweeksago, oneweekago, fund)
tcount = q.ContributionCount(oneyearago, oneweekago, fund)
Data.cnAvgAmtPerDonorYear = \
q.ContributionTotals(oneyearago, oneweekago, fund) \
/ tcount if tcount > 0 else 0
Data.cnWeekly4WeekAvg = \
q.ContributionTotals(fiveweeksago, oneweekago, fund) / 4
Data.cnWeeklyAvgCurrYear = \
q.ContributionTotals(oneyearago, oneweekago, fund) / weeksinyear
Data.cnWeeklyAvgPrevYear = \
q.ContributionTotals(twoyearsago, oneyearago, fund) / weeksinyear
Data.cnDateRangeCurrYear = \
q.DateRangeForContributionTotals(oneyearago, oneweekago)
Data.cnDateRangePrevYear = \
q.DateRangeForContributionTotals(twoyearsago, oneyearago)
template = """
<style>
#vitalStats { width:auto; margin-left:auto; margin-right:auto; }
#vitalStats td { text-align: right; }
</style>
<table id="vitalStats" class="table">
<tr><th colspan="2">Counts for past {{days}} days</th></tr>
<tr><td>Members</td>
<td>{{Fmt members "N0"}}</td></tr>
<tr><td>Decisions</td>
<td>{{Fmt decisions "N0"}}</td></tr>
<tr><td>Meetings</td>
<td>{{Fmt meetings "N0"}}</td></tr>
<tr><td>Sum of Present in Meetings</td>
<td>{{Fmt numPresent "N0"}}</td></tr>
<tr><td>Unique Attends</td>
<td>{{Fmt uniqueAttends "N0"}}</td></tr>
<tr><td>New Attends</td>
<td>{{Fmt newAttends "N0"}}</td></tr>
<tr><td>Contacts</td>
<td>{{Fmt contacts "N0"}}</td></tr>
<tr><td>Registrations</td>
<td>{{Fmt registrations "N0"}}</td></tr>
<tr><th colspan="2">Contributions-All Funds</th></tr>
<tr><td>Average Gift Size</td>
<td>{{Fmt cnAvgAmtPerDonorYear "N2"}}</td</tr>
<tr><td>Weekly average past 4 weeks</td>
<td>{{Fmt cnWeekly4WeekAvg "N2"}}</td></tr>
<tr><td>Weekly average current year</td>
<td>{{Fmt cnWeeklyAvgCurrYear "N2"}}</td>
<td>{{cnDateRangeCurrYear}}</td></tr>
<tr><td>Weekly average previous year</td>
<td>{{Fmt cnWeeklyAvgPrevYear "N2"}}</td>
<td>{{cnDateRangePrevYear}}</td></tr>
</table>
"""
print model.RenderTemplate(template)
|
RGray1959/MyParish
|
CmsWeb/Content/VitalStats.py
|
Python
|
gpl-2.0
| 3,801
|
[
"VisIt"
] |
060f5a1f2bb80203b36a4689e5df4f317955d3656f637692ece07efe6b524e16
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import pytest
import os.path as osp
from sisl import Geometry, Atom
from sisl.io.vasp.car import *
import numpy as np
pytestmark = [pytest.mark.io, pytest.mark.vasp]
_dir = osp.join('sisl', 'io', 'vasp')
def test_geometry_car_mixed(sisl_tmp):
f = sisl_tmp('test_read_write.POSCAR', _dir)
atoms = [Atom[1],
Atom[2],
Atom[2],
Atom[1],
Atom[1],
Atom[2],
Atom[3]]
xyz = np.random.rand(len(atoms), 3)
geom = Geometry(xyz, atoms, 100)
geom.write(carSileVASP(f, 'w'))
assert carSileVASP(f).read_geometry() == geom
def test_geometry_car_group(sisl_tmp):
f = sisl_tmp('test_sort.POSCAR', _dir)
atoms = [Atom[1],
Atom[2],
Atom[2],
Atom[1],
Atom[1],
Atom[2],
Atom[3]]
xyz = np.random.rand(len(atoms), 3)
geom = Geometry(xyz, atoms, 100)
geom.write(carSileVASP(f, 'w'), group_species=True)
assert carSileVASP(f).read_geometry() != geom
geom = carSileVASP(f).geometry_group(geom)
assert carSileVASP(f).read_geometry() == geom
def test_geometry_car_allsame(sisl_tmp):
f = sisl_tmp('test_read_write.POSCAR', _dir)
atoms = Atom[1]
xyz = np.random.rand(10, 3)
geom = Geometry(xyz, atoms, 100)
geom.write(carSileVASP(f, 'w'))
assert carSileVASP(f).read_geometry() == geom
def test_geometry_car_dynamic(sisl_tmp):
f = sisl_tmp('test_dynamic.POSCAR', _dir)
atoms = Atom[1]
xyz = np.random.rand(10, 3)
geom = Geometry(xyz, atoms, 100)
read = carSileVASP(f)
# no dynamic (direct geometry)
geom.write(carSileVASP(f, 'w'), dynamic=None)
g, dyn = read.read_geometry(ret_dynamic=True)
assert dyn is None
geom.write(carSileVASP(f, 'w'), dynamic=False)
g, dyn = read.read_geometry(ret_dynamic=True)
assert not np.any(dyn)
geom.write(carSileVASP(f, 'w'), dynamic=True)
g, dyn = read.read_geometry(ret_dynamic=True)
assert np.all(dyn)
dynamic = [False] * len(geom)
dynamic[0] = [True, False, True]
geom.write(carSileVASP(f, 'w'), dynamic=dynamic)
g, dyn = read.read_geometry(ret_dynamic=True)
assert np.array_equal(dynamic[0], dyn[0])
assert not np.any(dyn[1:])
|
zerothi/sisl
|
sisl/io/vasp/tests/test_car.py
|
Python
|
mpl-2.0
| 2,477
|
[
"VASP"
] |
73476be61989d610222e34bfeaff097754efc985478a778cf2c1c73bfaa992bf
|
"""Simple tight-binding add-on for GPAWs LCAO module."""
from math import pi
import numpy as np
import scipy.linalg as sla
import ase.units as units
from gpaw.utilities.tools import tri2full
class TightBinding:
"""Simple class for tight-binding calculations."""
def __init__(self, atoms, calc):
"""Init with ``Atoms`` and a converged LCAO calculation."""
# Store and extract useful attributes from the calculator
self.atoms = atoms
self.calc = calc
self.kd = calc.wfs.kd
wfs = calc.wfs
kd = wfs.kd
kpt_u = wfs.kpt_u
# Matrix size
self.nao = wfs.setups.nao
# K-point info
self.gamma = kd.gamma
if self.gamma:
self.Nk_c = (1, 1, 1)
else:
self.Nk_c = tuple(kd.N_c)
self.ibzk_kc = kd.ibzk_kc
self.ibzk_qc = kd.ibzk_qc
self.bzk_kc = kd.bzk_kc
# Symmetry
self.symmetry = kd.symmetry
if calc.input_parameters['usesymm'] is True:
raise NotImplementedError, "Only time-reversal symmetry supported."
# Lattice vectors and number of repetitions
self.R_cN = None
self.N_c = None
# Init with default number of real-space cells
self.set_num_cells()
def set_num_cells(self, N_c=None):
"""Set number of real-space cells to use.
Parameters
----------
N_c: tuple or ndarray
Number of unit cells in each direction of the basis vectors.
"""
if N_c is None:
self.N_c = tuple(self.Nk_c)
else:
self.N_c = tuple(N_c)
if np.any(np.asarray(self.Nk_c) < np.asarray(self.N_c)):
print("WARNING: insufficient k-point sampling.")
# Lattice vectors
R_cN = np.indices(self.N_c).reshape(3, -1)
N_c = np.array(self.N_c)[:, np.newaxis]
R_cN += N_c // 2
R_cN %= N_c
R_cN -= N_c // 2
self.R_cN = R_cN
def lattice_vectors(self):
"""Return real-space lattice vectors."""
return self.R_cN
def bloch_to_real_space(self, A_qxMM, R_c=None):
"""Transform quantity from Bloch to real-space representation.
Parameters
----------
A_qxMM: ndarray
Bloch representation of matrix. May be parallelized over k-points.
R_cN: ndarray
Cell vectors for which the real-space matrices will be calculated
and returned.
"""
# Include all cells per default
if R_c is None:
R_Nc = self.R_cN.transpose()
else:
R_Nc = [R_c]
# Real-space quantities
A_NxMM = []
# Reshape input array
shape = A_qxMM.shape
A_qx = A_qxMM.reshape(shape[0], -1)
# Fourier transform to real-space
for R_c in R_Nc:
# Evaluate fourier sum
phase_q = np.exp(2.j * pi * np.dot(self.ibzk_qc, R_c))
A_x = np.sum(phase_q[:, np.newaxis] * A_qx, axis=0)
self.kd.comm.sum(A_x)
A_xMM = A_x.reshape(shape[1:])
# Time-reversal symmetry
if not len(self.ibzk_kc) == len(self.bzk_kc):
# Broadcast Gamma component
gamma = np.where(np.sum(np.abs(self.ibzk_kc), axis=1) == 0.0)[0]
rank, myu = self.kd.get_rank_and_index(0, gamma)
#
if self.kd.comm.rank == rank[0]:
A0_xMM = A_qxMM[myu[0]]
else:
A0_xMM = np.zeros_like(A_xMM)
#
self.kd.comm.broadcast(A0_xMM, rank[0])
# Add conjugate and substract double counted Gamma component
A_xMM += A_xMM.conj() - A0_xMM
A_xMM /= np.prod(self.Nk_c)
try:
assert np.all(np.abs(A_xMM.imag) < 1e-10)
except AssertionError:
raise ValueError, "MAX Im(A_MM): % .2e" % np.amax(np.abs(A_xMM.imag))
A_NxMM.append(A_xMM.real)
return np.array(A_NxMM)
def h_and_s(self):
"""Return LCAO Hamiltonian and overlap matrix in real-space."""
# Extract Bloch Hamiltonian and overlap matrix
H_kMM = []
S_kMM = []
h = self.calc.hamiltonian
wfs = self.calc.wfs
kpt_u = wfs.kpt_u
for kpt in kpt_u:
H_MM = wfs.eigensolver.calculate_hamiltonian_matrix(h, wfs, kpt)
S_MM = wfs.S_qMM[kpt.q]
#XXX Converting to full matrices here
tri2full(H_MM)
tri2full(S_MM)
H_kMM.append(H_MM)
S_kMM.append(S_MM)
# Convert to arrays
H_kMM = np.array(H_kMM)
S_kMM = np.array(S_kMM)
H_NMM = self.bloch_to_real_space(H_kMM)
S_NMM = self.bloch_to_real_space(S_kMM)
return H_NMM, S_NMM
def band_structure(self, path_kc, blochstates=False):
"""Calculate dispersion along a path in the Brillouin zone.
Parameters
----------
path_kc: ndarray
List of k-point coordinates (in units of the reciprocal lattice
vectors) specifying the path in the Brillouin zone for which the
dynamical matrix will be calculated.
blochstates: bool
Return LCAO expansion coefficients when True (default: False).
"""
# Real-space matrices
self.H_NMM, self.S_NMM = self.h_and_s()
assert self.H_NMM is not None
assert self.S_NMM is not None
# Lattice vectors
R_cN = self.R_cN
# Lists for eigenvalues and eigenvectors along path
eps_kn = []
psi_kn = []
for k_c in path_kc:
# Evaluate fourier sum
phase_N = np.exp(-2.j * pi * np.dot(k_c, R_cN))
H_MM = np.sum(phase_N[:, np.newaxis, np.newaxis] * self.H_NMM,
axis=0)
S_MM = np.sum(phase_N[:, np.newaxis, np.newaxis] * self.S_NMM,
axis=0)
if blochstates:
eps_n, c_Mn = sla.eigh(H_MM, S_MM)
# Sort eigenmodes according to increasing eigenvalues
c_nM = c_Mn[:, eps_n.argsort()].transpose()
psi_kn.append(c_nM)
else:
eps_n = sla.eigvalsh(H_MM, S_MM)
# Sort eigenvalues in increasing order
eps_n.sort()
eps_kn.append(eps_n)
# Convert to eV
eps_kn = np.array(eps_kn) * units.Hartree
if blochstates:
return eps_kn, np.array(psi_kn)
return eps_kn
|
ajylee/gpaw-rtxs
|
gpaw/lcao/tightbinding.py
|
Python
|
gpl-3.0
| 6,810
|
[
"ASE",
"GPAW"
] |
501a037793b92444009d9fea5a642261744721904ad6f0127444c7378c84b717
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Early initialization and main entry point.
qutebrowser's initialization process roughly looks like this:
- This file gets imported, either via the setuptools entry point or
__main__.py.
- At import time, we check for the correct Python version and show an error if
it's too old.
- The main() function in this file gets invoked
- Argument parsing takes place
- earlyinit.early_init() gets invoked to do various low-level initialization
and checks whether all dependencies are met.
- app.run() gets called, which takes over.
See the docstring of app.py for details.
"""
import sys
import json
import qutebrowser
try:
from qutebrowser.misc.checkpyver import check_python_version
except ImportError:
try:
# python2
from .misc.checkpyver import check_python_version
except (SystemError, ValueError):
# Import without module - SystemError on Python3, ValueError (?!?) on
# Python2
sys.stderr.write("Please don't run this script directly, do something "
"like python3 -m qutebrowser instead.\n")
sys.stderr.flush()
sys.exit(100)
check_python_version()
from qutebrowser.utils import log
import argparse # pylint: disable=wrong-import-order
from qutebrowser.misc import earlyinit
def get_argparser():
"""Get the argparse parser."""
parser = argparse.ArgumentParser(prog='qutebrowser',
description=qutebrowser.__description__)
parser.add_argument('-B', '--basedir', help="Base directory for all "
"storage.")
parser.add_argument('-V', '--version', help="Show version and quit.",
action='store_true')
parser.add_argument('-s', '--set', help="Set a temporary setting for "
"this session.", nargs=2, action='append',
dest='temp_settings', default=[],
metavar=('OPTION', 'VALUE'))
parser.add_argument('-r', '--restore', help="Restore a named session.",
dest='session')
parser.add_argument('-R', '--override-restore', help="Don't restore a "
"session even if one would be restored.",
action='store_true')
parser.add_argument('--target', choices=['auto', 'tab', 'tab-bg',
'tab-silent', 'tab-bg-silent',
'window'],
help="How URLs should be opened if there is already a "
"qutebrowser instance running.")
parser.add_argument('--backend', choices=['webkit', 'webengine'],
help="Which backend to use.")
parser.add_argument('--enable-webengine-inspector', action='store_true',
help="Enable the web inspector for QtWebEngine. Note "
"that this is a SECURITY RISK and you should not "
"visit untrusted websites with the inspector turned "
"on. See https://bugreports.qt.io/browse/QTBUG-50725 "
"for more details. This is not needed anymore since "
"Qt 5.11 where the inspector is always enabled and "
"secure.")
parser.add_argument('--json-args', help=argparse.SUPPRESS)
parser.add_argument('--temp-basedir-restarted', help=argparse.SUPPRESS)
debug = parser.add_argument_group('debug arguments')
debug.add_argument('-l', '--loglevel', dest='loglevel',
help="Set loglevel", default='info',
choices=['critical', 'error', 'warning', 'info',
'debug', 'vdebug'])
debug.add_argument('--logfilter', type=logfilter_error,
help="Comma-separated list of things to be logged "
"to the debug log on stdout.")
debug.add_argument('--loglines',
help="How many lines of the debug log to keep in RAM "
"(-1: unlimited).",
default=2000, type=int)
debug.add_argument('-d', '--debug', help="Turn on debugging options.",
action='store_true')
debug.add_argument('--json-logging', action='store_true', help="Output log"
" lines in JSON format (one object per line).")
debug.add_argument('--nocolor', help="Turn off colored logging.",
action='store_false', dest='color')
debug.add_argument('--force-color', help="Force colored logging",
action='store_true')
debug.add_argument('--nowindow', action='store_true', help="Don't show "
"the main window.")
debug.add_argument('-T', '--temp-basedir', action='store_true', help="Use "
"a temporary basedir.")
debug.add_argument('--no-err-windows', action='store_true', help="Don't "
"show any error windows (used for tests/smoke.py).")
debug.add_argument('--qt-arg', help="Pass an argument with a value to Qt. "
"For example, you can do "
"`--qt-arg geometry 650x555+200+300` to set the window "
"geometry.", nargs=2, metavar=('NAME', 'VALUE'),
action='append')
debug.add_argument('--qt-flag', help="Pass an argument to Qt as flag.",
nargs=1, action='append')
debug.add_argument('-D', '--debug-flag', type=debug_flag_error,
default=[], help="Pass name of debugging feature to be"
" turned on.", action='append', dest='debug_flags')
parser.add_argument('command', nargs='*', help="Commands to execute on "
"startup.", metavar=':command')
# URLs will actually be in command
parser.add_argument('url', nargs='*', help="URLs to open on startup "
"(empty as a window separator).")
return parser
def directory(arg):
if not arg:
raise argparse.ArgumentTypeError("Invalid empty value")
def logfilter_error(logfilter):
"""Validate logger names passed to --logfilter.
Args:
logfilter: A comma separated list of logger names.
"""
if set(logfilter.lstrip('!').split(',')).issubset(log.LOGGER_NAMES):
return logfilter
else:
raise argparse.ArgumentTypeError(
"filters: Invalid value {} - expected a list of: {}".format(
logfilter, ', '.join(log.LOGGER_NAMES)))
def debug_flag_error(flag):
"""Validate flags passed to --debug-flag.
Available flags:
debug-exit: Turn on debugging of late exit.
pdb-postmortem: Drop into pdb on exceptions.
no-sql-history: Don't store history items.
no-scroll-filtering: Process all scrolling updates.
log-requests: Log all network requests.
"""
valid_flags = ['debug-exit', 'pdb-postmortem', 'no-sql-history',
'no-scroll-filtering', 'log-requests', 'lost-focusproxy']
if flag in valid_flags:
return flag
else:
raise argparse.ArgumentTypeError("Invalid debug flag - valid flags: {}"
.format(', '.join(valid_flags)))
def main():
parser = get_argparser()
argv = sys.argv[1:]
args = parser.parse_args(argv)
if args.json_args is not None:
# Restoring after a restart.
# When restarting, we serialize the argparse namespace into json, and
# construct a "fake" argparse.Namespace here based on the data loaded
# from json.
data = json.loads(args.json_args)
args = argparse.Namespace(**data)
earlyinit.early_init(args)
# We do this imports late as earlyinit needs to be run first (because of
# version checking and other early initialization)
from qutebrowser import app
return app.run(args)
|
V155/qutebrowser
|
qutebrowser/qutebrowser.py
|
Python
|
gpl-3.0
| 8,778
|
[
"VisIt"
] |
bec47b6d658dd52ef58a8bfa6e4c52f52fba23293f98e6a00e349fd5cfe4aa0b
|
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2016 the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Test the various population analyses (MPA, LPA, CSPA) in cclib"""
import sys
import os
import logging
import unittest
import numpy
sys.path.append("..")
from test_data import getdatafile
from cclib.method import Orbitals
from cclib.parser import Gaussian
from cclib.parser import Psi
class RestrictedCalculationTest(unittest.TestCase):
"""Check retricted calculation."""
def setUp(self):
self.data, self.logfile = getdatafile(Gaussian, "basicGaussian09", ["dvb_sp.out"])
def test_closed_shell(self):
self.assertTrue(Orbitals(self.data).closed_shell())
class UnrestrictedCalculationTest(unittest.TestCase):
"""Check unrestricted calculation."""
def setUp(self):
self.data, self.logfile = getdatafile(Gaussian, "basicGaussian09", ["dvb_un_sp.log"])
def test_closed_shell(self):
self.assertFalse(Orbitals(self.data).closed_shell())
class RestrictedOpenShellCalculationTest(unittest.TestCase):
"""Check restricted open shell calcualtion."""
def setUp(self):
self.data, self.logfile = getdatafile(Psi, "basicPsi4.0", ["dvb_sp_rohf.out"])
def test_closed_shel(self):
self.assertFalse(Orbitals(self.data).closed_shell())
# TODO: add a case (regression) with an unrestricted calculation for a closed shell system.
# For example, in regressions: Gaussian/Gaussian03/Mo4OSibdt2
tests = [RestrictedCalculationTest, UnrestrictedCalculationTest]
if __name__ == "__main__":
for test in tests:
thistest = unittest.makeSuite(test)
unittest.TextTestRunner(verbosity=2).run(thistest)
|
gaursagar/cclib
|
test/method/testorbitals.py
|
Python
|
bsd-3-clause
| 2,060
|
[
"Gaussian",
"cclib"
] |
17bd1051550b1cdfc66e7d4c459e6956f4408da526f7e6833b80df52a679cfd2
|
import configparser
import pathlib
import pickle
from PyQt5.QtCore import pyqtSlot, Qt, QTimer, QVariant, QEvent
from PyQt5.QtGui import QIcon, QCloseEvent, QCursor, QShowEvent, \
QWindowStateChangeEvent
from PyQt5.QtWidgets import QWidget, QFrame, QMessageBox, QSystemTrayIcon, \
QScrollArea, QMenu, QAction, QLabel, QVBoxLayout, QHBoxLayout
from .customwidgets.xtabwidget import XTabWidget
from .flights_widget import FlightsWidget
from .planets_bar_widget import PlanetSidebarWidget
from .statusbar import XNCStatusBar
from .galaxy_widget import GalaxyWidget
from .imperium_widget import ImperiumWidget
from .login_widget import LoginWidget
from .overview_widget import OverviewWidget
from .planet_widget import PlanetWidget
from .settings_widget import SettingsWidget
from .widget_utils import install_layout_for_widget, \
append_trailing_spacer_to_layout, \
remove_trailing_spacer_from_layout, \
flight_mission_for_humans
from .xnova import xn_logger
from .xnova.xn_data import XNCoords, XNFlight, XNPlanet, XNPlanetBuildingItem
from .xnova.xn_world import XNovaWorld_instance
logger = xn_logger.get(__name__, debug=True)
# This class will control:
# 1. main application window in general, and all UI (tray icon, etc)
# (although each tab will have its own widget controller)
# 2. XNova world object and background world updater thread
class XNova_MainWindow(QWidget):
STATE_NOT_AUTHED = 0
STATE_AUTHED = 1
def __init__(self, parent=None):
super(XNova_MainWindow, self).__init__(parent, Qt.Window)
# state vars
self.config_store_dir = './cache'
self.cfg = configparser.ConfigParser()
self.cfg.read('config/net.ini', encoding='utf-8')
self.state = self.STATE_NOT_AUTHED
self.login_email = ''
self.cookies_dict = {}
self._hidden_to_tray = False
#
# init UI
self.setWindowIcon(QIcon(':/i/xnova_logo_64.png'))
self.setWindowTitle('XNova Commander')
# main layouts
self._layout = QVBoxLayout()
self._layout.setContentsMargins(0, 2, 0, 0)
self._layout.setSpacing(3)
self.setLayout(self._layout)
self._horizontal_layout = QHBoxLayout()
self._horizontal_layout.setContentsMargins(0, 0, 0, 0)
self._horizontal_layout.setSpacing(6)
# flights frame
self._fr_flights = QFrame(self)
self._fr_flights.setMinimumHeight(22)
self._fr_flights.setFrameShape(QFrame.NoFrame)
self._fr_flights.setFrameShadow(QFrame.Plain)
# planets bar scrollarea
self._sa_planets = QScrollArea(self)
self._sa_planets.setMinimumWidth(125)
self._sa_planets.setMaximumWidth(125)
self._sa_planets.setFrameShape(QFrame.NoFrame)
self._sa_planets.setFrameShadow(QFrame.Plain)
self._sa_planets.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self._sa_planets.setWidgetResizable(True)
self._panel_planets = QWidget(self._sa_planets)
self._layout_pp = QVBoxLayout()
self._panel_planets.setLayout(self._layout_pp)
self._lbl_planets = QLabel(self.tr('Planets:'), self._panel_planets)
self._lbl_planets.setMaximumHeight(32)
self._layout_pp.addWidget(self._lbl_planets)
self._layout_pp.addStretch()
self._sa_planets.setWidget(self._panel_planets)
#
# tab widget
self._tabwidget = XTabWidget(self)
self._tabwidget.enableButtonAdd(False)
self._tabwidget.tabCloseRequested.connect(self.on_tab_close_requested)
self._tabwidget.addClicked.connect(self.on_tab_add_clicked)
#
# create status bar
self._statusbar = XNCStatusBar(self)
self.set_status_message(self.tr('Not connected: Log in!'))
#
# tab widget pages
self.login_widget = None
self.flights_widget = None
self.overview_widget = None
self.imperium_widget = None
#
# settings widget
self.settings_widget = SettingsWidget(self)
self.settings_widget.settings_changed.connect(self.on_settings_changed)
self.settings_widget.hide()
#
# finalize layouts
self._horizontal_layout.addWidget(self._sa_planets)
self._horizontal_layout.addWidget(self._tabwidget)
self._layout.addWidget(self._fr_flights)
self._layout.addLayout(self._horizontal_layout)
self._layout.addWidget(self._statusbar)
#
# system tray icon
self.tray_icon = None
show_tray_icon = False
if 'tray' in self.cfg:
if (self.cfg['tray']['icon_usage'] == 'show') or \
(self.cfg['tray']['icon_usage'] == 'show_min'):
self.create_tray_icon()
#
# try to restore last window size
ssz = self.load_cfg_val('main_size')
if ssz is not None:
self.resize(ssz[0], ssz[1])
#
# world initialization
self.world = XNovaWorld_instance()
self.world_timer = QTimer(self)
self.world_timer.timeout.connect(self.on_world_timer)
# overrides QWidget.closeEvent
# cleanup just before the window close
def closeEvent(self, close_event: QCloseEvent):
logger.debug('closing')
if self.tray_icon is not None:
self.tray_icon.hide()
self.tray_icon = None
if self.world_timer.isActive():
self.world_timer.stop()
self.world.script_command = 'stop' # also stop possible running scripts
if self.world.isRunning():
self.world.quit()
logger.debug('waiting for world thread to stop (5 sec)...')
wait_res = self.world.wait(5000)
if not wait_res:
logger.warn('wait failed, last chance, terminating!')
self.world.terminate()
# store window size
ssz = (self.width(), self.height())
self.store_cfg_val('main_size', ssz)
# accept the event
close_event.accept()
def showEvent(self, evt: QShowEvent):
super(XNova_MainWindow, self).showEvent(evt)
self._hidden_to_tray = False
def changeEvent(self, evt: QEvent):
super(XNova_MainWindow, self).changeEvent(evt)
if evt.type() == QEvent.WindowStateChange:
if not isinstance(evt, QWindowStateChangeEvent):
return
# make sure we only do this for minimize events
if (evt.oldState() != Qt.WindowMinimized) and self.isMinimized():
# we were minimized! explicitly hide settings widget
# if it is open, otherwise it will be lost forever :(
if self.settings_widget is not None:
if self.settings_widget.isVisible():
self.settings_widget.hide()
# should we minimize to tray?
if self.cfg['tray']['icon_usage'] == 'show_min':
if not self._hidden_to_tray:
self._hidden_to_tray = True
self.hide()
def create_tray_icon(self):
if QSystemTrayIcon.isSystemTrayAvailable():
logger.debug('System tray icon is available, showing')
self.tray_icon = QSystemTrayIcon(QIcon(':/i/xnova_logo_32.png'), self)
self.tray_icon.setToolTip(self.tr('XNova Commander'))
self.tray_icon.activated.connect(self.on_tray_icon_activated)
self.tray_icon.show()
else:
self.tray_icon = None
def hide_tray_icon(self):
if self.tray_icon is not None:
self.tray_icon.hide()
self.tray_icon.deleteLater()
self.tray_icon = None
def set_tray_tooltip(self, tip: str):
if self.tray_icon is not None:
self.tray_icon.setToolTip(tip)
def set_status_message(self, msg: str):
self._statusbar.set_status(msg)
def store_cfg_val(self, category: str, value):
pickle_filename = '{0}/{1}.dat'.format(self.config_store_dir, category)
try:
cache_dir = pathlib.Path(self.config_store_dir)
if not cache_dir.exists():
cache_dir.mkdir()
with open(pickle_filename, 'wb') as f:
pickle.dump(value, f)
except pickle.PickleError as pe:
pass
except IOError as ioe:
pass
def load_cfg_val(self, category: str, default_value=None):
value = None
pickle_filename = '{0}/{1}.dat'.format(self.config_store_dir, category)
try:
with open(pickle_filename, 'rb') as f:
value = pickle.load(f)
if value is None:
value = default_value
except pickle.PickleError as pe:
pass
except IOError as ioe:
pass
return value
@pyqtSlot()
def on_settings_changed(self):
self.cfg.read('config/net.ini', encoding='utf-8')
# maybe show/hide tray icon now?
show_tray_icon = False
if 'tray' in self.cfg:
icon_usage = self.cfg['tray']['icon_usage']
if (icon_usage == 'show') or (icon_usage == 'show_min'):
show_tray_icon = True
# show if needs show and hidden, or hide if shown and needs to hide
if show_tray_icon and (self.tray_icon is None):
logger.debug('settings changed, showing tray icon')
self.create_tray_icon()
elif (not show_tray_icon) and (self.tray_icon is not None):
logger.debug('settings changed, hiding tray icon')
self.hide_tray_icon()
# also notify world about changed config!
self.world.reload_config()
def add_tab(self, widget: QWidget, title: str, closeable: bool = True) -> int:
tab_index = self._tabwidget.addTab(widget, title, closeable)
return tab_index
def remove_tab(self, index: int):
self._tabwidget.removeTab(index)
# called by main application object just after main window creation
# to show login widget and begin login process
def begin_login(self):
# create flights widget
self.flights_widget = FlightsWidget(self._fr_flights)
self.flights_widget.load_ui()
install_layout_for_widget(self._fr_flights, Qt.Vertical, margins=(1, 1, 1, 1), spacing=1)
self._fr_flights.layout().addWidget(self.flights_widget)
self.flights_widget.set_online_state(False)
self.flights_widget.requestShowSettings.connect(self.on_show_settings)
# create and show login widget as first tab
self.login_widget = LoginWidget(self._tabwidget)
self.login_widget.load_ui()
self.login_widget.loginError.connect(self.on_login_error)
self.login_widget.loginOk.connect(self.on_login_ok)
self.login_widget.show()
self.add_tab(self.login_widget, self.tr('Login'), closeable=False)
# self.test_setup_planets_panel()
# self.test_planet_tab()
def setup_planets_panel(self, planets: list):
layout = self._panel_planets.layout()
layout.setSpacing(0)
remove_trailing_spacer_from_layout(layout)
# remove all previous planet widgets from planets panel
if layout.count() > 0:
for i in range(layout.count()-1, -1, -1):
li = layout.itemAt(i)
if li is not None:
wi = li.widget()
if wi is not None:
if isinstance(wi, PlanetSidebarWidget):
layout.removeWidget(wi)
wi.close()
wi.deleteLater() # fix possible mem leak
del wi
for pl in planets:
pw = PlanetSidebarWidget(self._panel_planets)
pw.setPlanet(pl)
layout.addWidget(pw)
pw.show()
# connections from each planet bar widget
pw.requestOpenGalaxy.connect(self.on_request_open_galaxy_tab)
pw.requestOpenPlanet.connect(self.on_request_open_planet_tab)
append_trailing_spacer_to_layout(layout)
def update_planets_panel(self):
"""
Calls QWidget.update() on every PlanetBarWidget
embedded in ui.panel_planets, causing repaint
"""
layout = self._panel_planets.layout()
if layout.count() > 0:
for i in range(layout.count()):
li = layout.itemAt(i)
if li is not None:
wi = li.widget()
if wi is not None:
if isinstance(wi, PlanetSidebarWidget):
wi.update()
def add_tab_for_planet(self, planet: XNPlanet):
# construct planet widget and setup signals/slots
plw = PlanetWidget(self._tabwidget)
plw.requestOpenGalaxy.connect(self.on_request_open_galaxy_tab)
plw.setPlanet(planet)
# construct tab title
tab_title = '{0} {1}'.format(planet.name, planet.coords.coords_str())
# add tab and make it current
tab_index = self.add_tab(plw, tab_title, closeable=True)
self._tabwidget.setCurrentIndex(tab_index)
self._tabwidget.tabBar().setTabIcon(tab_index, QIcon(':/i/planet_32.png'))
return tab_index
def add_tab_for_galaxy(self, coords: XNCoords = None):
gw = GalaxyWidget(self._tabwidget)
tab_title = '{0}'.format(self.tr('Galaxy'))
if coords is not None:
tab_title = '{0} {1}'.format(self.tr('Galaxy'), coords.coords_str())
gw.setCoords(coords.galaxy, coords.system)
idx = self.add_tab(gw, tab_title, closeable=True)
self._tabwidget.setCurrentIndex(idx)
self._tabwidget.tabBar().setTabIcon(idx, QIcon(':/i/galaxy_32.png'))
@pyqtSlot(int)
def on_tab_close_requested(self, idx: int):
# logger.debug('tab close requested: {0}'.format(idx))
if idx <= 1: # cannot close overview or imperium tabs
return
self.remove_tab(idx)
@pyqtSlot()
def on_tab_add_clicked(self):
pos = QCursor.pos()
planets = self.world.get_planets()
# logger.debug('tab bar add clicked, cursor pos = ({0}, {1})'.format(pos.x(), pos.y()))
menu = QMenu(self)
# galaxy view
galaxy_action = QAction(menu)
galaxy_action.setText(self.tr('Add galaxy view'))
galaxy_action.setData(QVariant('galaxy'))
menu.addAction(galaxy_action)
# planets
menu.addSection(self.tr('-- Planet tabs: --'))
for planet in planets:
action = QAction(menu)
action.setText('{0} {1}'.format(planet.name, planet.coords.coords_str()))
action.setData(QVariant(planet.planet_id))
menu.addAction(action)
action_ret = menu.exec(pos)
if action_ret is not None:
# logger.debug('selected action data = {0}'.format(str(action_ret.data())))
if action_ret == galaxy_action:
logger.debug('action_ret == galaxy_action')
self.add_tab_for_galaxy()
return
# else consider this is planet widget
planet_id = int(action_ret.data())
self.on_request_open_planet_tab(planet_id)
@pyqtSlot(str)
def on_login_error(self, errstr):
logger.error('Login error: {0}'.format(errstr))
self.state = self.STATE_NOT_AUTHED
self.set_status_message(self.tr('Login error: {0}').format(errstr))
QMessageBox.critical(self, self.tr('Login error:'), errstr)
@pyqtSlot(str, dict)
def on_login_ok(self, login_email, cookies_dict):
# logger.debug('Login OK, login: {0}, cookies: {1}'.format(login_email, str(cookies_dict)))
# save login data: email, cookies
self.state = self.STATE_AUTHED
self.set_status_message(self.tr('Login OK, loading world'))
self.login_email = login_email
self.cookies_dict = cookies_dict
#
# destroy login widget and remove its tab
self.remove_tab(0)
self.login_widget.close()
self.login_widget.deleteLater()
self.login_widget = None
#
# create overview widget and add it as first tab
self.overview_widget = OverviewWidget(self._tabwidget)
self.overview_widget.load_ui()
self.add_tab(self.overview_widget, self.tr('Overview'), closeable=False)
self.overview_widget.show()
self.overview_widget.setEnabled(False)
#
# create 2nd tab - Imperium
self.imperium_widget = ImperiumWidget(self._tabwidget)
self.add_tab(self.imperium_widget, self.tr('Imperium'), closeable=False)
self.imperium_widget.setEnabled(False)
#
# initialize XNova world updater
self.world.initialize(cookies_dict)
self.world.set_login_email(self.login_email)
# connect signals from world
self.world.world_load_progress.connect(self.on_world_load_progress)
self.world.world_load_complete.connect(self.on_world_load_complete)
self.world.net_request_started.connect(self.on_net_request_started)
self.world.net_request_finished.connect(self.on_net_request_finished)
self.world.flight_arrived.connect(self.on_flight_arrived)
self.world.build_complete.connect(self.on_building_complete)
self.world.loaded_overview.connect(self.on_loaded_overview)
self.world.loaded_imperium.connect(self.on_loaded_imperium)
self.world.loaded_planet.connect(self.on_loaded_planet)
self.world.start()
@pyqtSlot(str, int)
def on_world_load_progress(self, comment: str, progress: int):
self._statusbar.set_world_load_progress(comment, progress)
@pyqtSlot()
def on_world_load_complete(self):
logger.debug('main: on_world_load_complete()')
# enable adding new tabs
self._tabwidget.enableButtonAdd(True)
# update statusbar
self._statusbar.set_world_load_progress('', -1) # turn off progress display
self.set_status_message(self.tr('World loaded.'))
# update account info
if self.overview_widget is not None:
self.overview_widget.setEnabled(True)
self.overview_widget.update_account_info()
self.overview_widget.update_builds()
# update flying fleets
self.flights_widget.set_online_state(True)
self.flights_widget.update_flights()
# update planets
planets = self.world.get_planets()
self.setup_planets_panel(planets)
if self.imperium_widget is not None:
self.imperium_widget.setEnabled(True)
self.imperium_widget.update_planets()
# update statusbar
self._statusbar.update_online_players_count()
# update tray tooltip, add account name
self.set_tray_tooltip(self.tr('XNova Commander') + ' - '
+ self.world.get_account_info().login)
# set timer to do every-second world recalculation
self.world_timer.setInterval(1000)
self.world_timer.setSingleShot(False)
self.world_timer.start()
@pyqtSlot()
def on_loaded_overview(self):
logger.debug('on_loaded_overview')
# A lot of things are updated when overview is loaded
# * Account information and stats
if self.overview_widget is not None:
self.overview_widget.update_account_info()
# * flights will be updated every second anyway in on_world_timer(), so no need to call
# self.flights_widget.update_flights()
# * messages count also, is updated with flights
# * current planet may have changed
self.update_planets_panel()
# * server time is updated also
self._statusbar.update_online_players_count()
@pyqtSlot()
def on_loaded_imperium(self):
logger.debug('on_loaded_imperium')
# need to update imperium widget
if self.imperium_widget is not None:
self.imperium_widget.update_planets()
# The important note here is that imperium update is the only place where
# the planets list is read, so number of planets, their names, etc may change here
# Also, imperium update OVERWRITES full planets array, so, all prev
# references to planets in all GUI elements must be invalidated, because
# they will point to unused, outdated planets
planets = self.world.get_planets()
# re-create planets sidebar
self.setup_planets_panel(planets)
# update all builds in overview widget
if self.overview_widget:
self.overview_widget.update_builds()
# update all planet tabs with new planet references
cnt = self._tabwidget.count()
if cnt > 2:
for index in range(2, cnt):
tab_page = self._tabwidget.tabWidget(index)
if tab_page is not None:
try:
tab_type = tab_page.get_tab_type()
if tab_type == 'planet':
tab_planet = tab_page.planet()
new_planet = self.world.get_planet(tab_planet.planet_id)
tab_page.setPlanet(new_planet)
except AttributeError: # not all pages may have method get_tab_type()
pass
@pyqtSlot(int)
def on_loaded_planet(self, planet_id: int):
logger.debug('Got signal on_loaded_planet({0}), updating overview '
'widget and planets panel'.format(planet_id))
if self.overview_widget:
self.overview_widget.update_builds()
self.update_planets_panel()
# update also planet tab, if any
planet = self.world.get_planet(planet_id)
if planet is not None:
tab_idx = self.find_tab_for_planet(planet_id)
if tab_idx != -1:
tab_widget = self._tabwidget.tabWidget(tab_idx)
if isinstance(tab_widget, PlanetWidget):
logger.debug('Updating planet tab #{}'.format(tab_idx))
tab_widget.setPlanet(planet)
@pyqtSlot()
def on_world_timer(self):
if self.world:
self.world.world_tick()
self.update_planets_panel()
if self.flights_widget:
self.flights_widget.update_flights()
if self.overview_widget:
self.overview_widget.update_builds()
if self.imperium_widget:
self.imperium_widget.update_planet_resources()
@pyqtSlot()
def on_net_request_started(self):
self._statusbar.set_loading_status(True)
@pyqtSlot()
def on_net_request_finished(self):
self._statusbar.set_loading_status(False)
@pyqtSlot(int)
def on_tray_icon_activated(self, reason):
# QSystemTrayIcon::Unknown 0 Unknown reason
# QSystemTrayIcon::Context 1 The context menu for the system tray entry was requested
# QSystemTrayIcon::DoubleClick 2 The system tray entry was double clicked
# QSystemTrayIcon::Trigger 3 The system tray entry was clicked
# QSystemTrayIcon::MiddleClick 4 The system tray entry was clicked with the middle mouse button
if reason == QSystemTrayIcon.Trigger:
# left-click
self.setWindowState((self.windowState() & ~Qt.WindowMinimized) | Qt.WindowActive)
self.show()
return
def show_tray_message(self, title, message, icon_type=None, timeout_ms=None):
"""
Shows message from system tray icon, if system supports it.
If no support, this is just a no-op
:param title: message title
:param message: message text
:param icon_type: one of:
QSystemTrayIcon.NoIcon 0 No icon is shown.
QSystemTrayIcon.Information 1 An information icon is shown.
QSystemTrayIcon.Warning 2 A standard warning icon is shown.
QSystemTrayIcon.Critical 3 A critical warning icon is shown
"""
if self.tray_icon is None:
return
if self.tray_icon.supportsMessages():
if icon_type is None:
icon_type = QSystemTrayIcon.Information
if timeout_ms is None:
timeout_ms = 10000
self.tray_icon.showMessage(title, message, icon_type, timeout_ms)
else:
logger.info('This system does not support tray icon messages.')
@pyqtSlot()
def on_show_settings(self):
if self.settings_widget is not None:
self.settings_widget.show()
self.settings_widget.showNormal()
@pyqtSlot(XNFlight)
def on_flight_arrived(self, fl: XNFlight):
logger.debug('main: flight arrival: {0}'.format(fl))
mis_str = flight_mission_for_humans(fl.mission)
if fl.direction == 'return':
mis_str += ' ' + self.tr('return')
short_fleet_info = self.tr('{0} {1} => {2}, {3} ship(s)').format(
mis_str, fl.src, fl.dst, len(fl.ships))
self.show_tray_message(self.tr('XNova: Fleet arrived'), short_fleet_info)
@pyqtSlot(XNPlanet, XNPlanetBuildingItem)
def on_building_complete(self, planet: XNPlanet, bitem: XNPlanetBuildingItem):
logger.debug('main: build complete: on planet {0}: {1}'.format(
planet.name, str(bitem)))
# update also planet tab, if any
if isinstance(planet, XNPlanet):
tab_idx = self.find_tab_for_planet(planet.planet_id)
if tab_idx != -1:
tab_widget = self._tabwidget.tabWidget(tab_idx)
if isinstance(tab_widget, PlanetWidget):
logger.debug('Updating planet tab #{}'.format(tab_idx))
tab_widget.setPlanet(planet)
# construct message to show in tray
if bitem.is_shipyard_item:
binfo_str = '{0} x {1}'.format(bitem.quantity, bitem.name)
else:
binfo_str = self.tr('{0} lv.{1}').format(bitem.name, bitem.level)
msg = self.tr('{0} has built {1}').format(planet.name, binfo_str)
self.show_tray_message(self.tr('XNova: Building complete'), msg)
@pyqtSlot(XNCoords)
def on_request_open_galaxy_tab(self, coords: XNCoords):
tab_index = self.find_tab_for_galaxy(coords.galaxy, coords.system)
if tab_index == -1: # create new tab for these coords
self.add_tab_for_galaxy(coords)
return
# else switch to that tab
self._tabwidget.setCurrentIndex(tab_index)
@pyqtSlot(int)
def on_request_open_planet_tab(self, planet_id: int):
tab_index = self.find_tab_for_planet(planet_id)
if tab_index == -1: # create new tab for planet
planet = self.world.get_planet(planet_id)
if planet is not None:
self.add_tab_for_planet(planet)
return
# else switch to that tab
self._tabwidget.setCurrentIndex(tab_index)
def find_tab_for_planet(self, planet_id: int) -> int:
"""
Finds tab index where specified planet is already opened
:param planet_id: planet id to search for
:return: tab index, or -1 if not found
"""
cnt = self._tabwidget.count()
if cnt < 3:
return -1 # only overview and imperium tabs are present
for index in range(2, cnt):
tab_page = self._tabwidget.tabWidget(index)
if tab_page is not None:
try:
tab_type = tab_page.get_tab_type()
if tab_type == 'planet':
tab_planet = tab_page.planet()
if tab_planet.planet_id == planet_id:
# we have found tab index where this planet is already opened
return index
except AttributeError: # not all pages may have method get_tab_type()
pass
return -1
def find_tab_for_galaxy(self, galaxy: int, system: int) -> int:
"""
Finds tab index where specified galaxy view is already opened
:param galaxy: galaxy target coordinate
:param system: system target coordinate
:return: tab index, or -1 if not found
"""
cnt = self._tabwidget.count()
if cnt < 3:
return -1 # only overview and imperium tabs are present
for index in range(2, cnt):
tab_page = self._tabwidget.tabWidget(index)
if tab_page is not None:
try:
tab_type = tab_page.get_tab_type()
if tab_type == 'galaxy':
coords = tab_page.coords()
if (coords[0] == galaxy) and (coords[1] == system):
# we have found galaxy tab index where this place is already opened
return index
except AttributeError: # not all pages may have method get_tab_type()
pass
return -1
def test_setup_planets_panel(self):
"""
Testing only - add 'fictive' planets to test planets panel without loading data
:return: None
"""
pl1 = XNPlanet('Arnon', XNCoords(1, 7, 6))
pl1.pic_url = 'skins/default/planeten/small/s_normaltempplanet08.jpg'
pl1.fields_busy = 90
pl1.fields_total = 167
pl1.is_current = True
pl2 = XNPlanet('Safizon', XNCoords(1, 232, 7))
pl2.pic_url = 'skins/default/planeten/small/s_dschjungelplanet05.jpg'
pl2.fields_busy = 84
pl2.fields_total = 207
pl2.is_current = False
test_planets = [pl1, pl2]
self.setup_planets_panel(test_planets)
def test_planet_tab(self):
"""
Testing only - add 'fictive' planet tab to test UI without loading world
:return:
"""
# construct planet
pl1 = XNPlanet('Arnon', coords=XNCoords(1, 7, 6), planet_id=12345)
pl1.pic_url = 'skins/default/planeten/small/s_normaltempplanet08.jpg'
pl1.fields_busy = 90
pl1.fields_total = 167
pl1.is_current = True
pl1.res_current.met = 10000000
pl1.res_current.cry = 50000
pl1.res_current.deit = 250000000 # 250 mil
pl1.res_per_hour.met = 60000
pl1.res_per_hour.cry = 30000
pl1.res_per_hour.deit = 15000
pl1.res_max_silos.met = 6000000
pl1.res_max_silos.cry = 3000000
pl1.res_max_silos.deit = 1000000
pl1.energy.energy_left = 10
pl1.energy.energy_total = 1962
pl1.energy.charge_percent = 92
# planet building item
bitem = XNPlanetBuildingItem()
bitem.gid = 1
bitem.name = 'Рудник металла'
bitem.level = 29
bitem.remove_link = ''
bitem.build_link = '?set=buildings&cmd=insert&building={0}'.format(bitem.gid)
bitem.seconds_total = 23746
bitem.cost_met = 7670042
bitem.cost_cry = 1917510
bitem.is_building_item = True
# second bitem
bitem2 = XNPlanetBuildingItem()
bitem2.gid = 2
bitem2.name = 'Рудник кристалла'
bitem2.level = 26
bitem2.remove_link = ''
bitem2.build_link = '?set=buildings&cmd=insert&building={0}'.format(bitem2.gid)
bitem2.seconds_total = 13746
bitem2.cost_met = 9735556
bitem2.cost_cry = 4667778
bitem2.is_building_item = True
bitem2.is_downgrade = True
bitem2.seconds_left = bitem2.seconds_total // 2
bitem2.calc_end_time()
# add bitems
pl1.buildings_items = [bitem, bitem2]
# add
self.add_tab_for_planet(pl1)
|
minlexx/xnovacmd
|
ui/main.py
|
Python
|
gpl-2.0
| 31,915
|
[
"Galaxy"
] |
10427f01383ed4f509613aaab5d0bb44e27c3e7ab21dcf0b2dc06abbc2bb2432
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_analyticsprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of AnalyticsProfile Avi RESTful Object
description:
- This module is used to configure AnalyticsProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
apdex_response_threshold:
description:
- If a client receives an http response in less than the satisfactory latency threshold, the request is considered satisfied.
- It is considered tolerated if it is not satisfied and less than tolerated latency factor multiplied by the satisfactory latency threshold.
- Greater than this number and the client's request is considered frustrated.
- Allowed values are 1-30000.
- Default value when not specified in API or module is interpreted by Avi Controller as 500.
apdex_response_tolerated_factor:
description:
- Client tolerated response latency factor.
- Client must receive a response within this factor times the satisfactory threshold (apdex_response_threshold) to be considered tolerated.
- Allowed values are 1-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
apdex_rtt_threshold:
description:
- Satisfactory client to avi round trip time(rtt).
- Allowed values are 1-2000.
- Default value when not specified in API or module is interpreted by Avi Controller as 250.
apdex_rtt_tolerated_factor:
description:
- Tolerated client to avi round trip time(rtt) factor.
- It is a multiple of apdex_rtt_tolerated_factor.
- Allowed values are 1-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
apdex_rum_threshold:
description:
- If a client is able to load a page in less than the satisfactory latency threshold, the pageload is considered satisfied.
- It is considered tolerated if it is greater than satisfied but less than the tolerated latency multiplied by satisifed latency.
- Greater than this number and the client's request is considered frustrated.
- A pageload includes the time for dns lookup, download of all http objects, and page render time.
- Allowed values are 1-30000.
- Default value when not specified in API or module is interpreted by Avi Controller as 5000.
apdex_rum_tolerated_factor:
description:
- Virtual service threshold factor for tolerated page load time (plt) as multiple of apdex_rum_threshold.
- Allowed values are 1-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
apdex_server_response_threshold:
description:
- A server http response is considered satisfied if latency is less than the satisfactory latency threshold.
- The response is considered tolerated when it is greater than satisfied but less than the tolerated latency factor * s_latency.
- Greater than this number and the server response is considered frustrated.
- Allowed values are 1-30000.
- Default value when not specified in API or module is interpreted by Avi Controller as 400.
apdex_server_response_tolerated_factor:
description:
- Server tolerated response latency factor.
- Servermust response within this factor times the satisfactory threshold (apdex_server_response_threshold) to be considered tolerated.
- Allowed values are 1-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
apdex_server_rtt_threshold:
description:
- Satisfactory client to avi round trip time(rtt).
- Allowed values are 1-2000.
- Default value when not specified in API or module is interpreted by Avi Controller as 125.
apdex_server_rtt_tolerated_factor:
description:
- Tolerated client to avi round trip time(rtt) factor.
- It is a multiple of apdex_rtt_tolerated_factor.
- Allowed values are 1-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
client_log_config:
description:
- Configure which logs are sent to the avi controller from ses and how they are processed.
client_log_syslog_config:
description:
- Configure to send logs to a remote syslog server.
- Field introduced in 17.1.1.
version_added: "2.4"
conn_lossy_ooo_threshold:
description:
- A connection between client and avi is considered lossy when more than this percentage of out of order packets are received.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
conn_lossy_timeo_rexmt_threshold:
description:
- A connection between client and avi is considered lossy when more than this percentage of packets are retransmitted due to timeout.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 20.
conn_lossy_total_rexmt_threshold:
description:
- A connection between client and avi is considered lossy when more than this percentage of packets are retransmitted.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
conn_lossy_zero_win_size_event_threshold:
description:
- A client connection is considered lossy when percentage of times a packet could not be trasmitted due to tcp zero window is above this threshold.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
conn_server_lossy_ooo_threshold:
description:
- A connection between avi and server is considered lossy when more than this percentage of out of order packets are received.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
conn_server_lossy_timeo_rexmt_threshold:
description:
- A connection between avi and server is considered lossy when more than this percentage of packets are retransmitted due to timeout.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 20.
conn_server_lossy_total_rexmt_threshold:
description:
- A connection between avi and server is considered lossy when more than this percentage of packets are retransmitted.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
conn_server_lossy_zero_win_size_event_threshold:
description:
- A server connection is considered lossy when percentage of times a packet could not be trasmitted due to tcp zero window is above this threshold.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
description:
description:
- User defined description for the object.
disable_se_analytics:
description:
- Disable node (service engine) level analytics forvs metrics.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
disable_server_analytics:
description:
- Disable analytics on backend servers.
- This may be desired in container environment when there are large number of ephemeral servers.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_client_close_before_request_as_error:
description:
- Exclude client closed connection before an http request could be completed from being classified as an error.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_gs_down_as_error:
description:
- Exclude queries to gslb services that are operationally down from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_http_error_codes:
description:
- List of http status codes to be excluded from being classified as an error.
- Error connections or responses impacts health score, are included as significant logs, and may be classified as part of a dos attack.
exclude_invalid_dns_domain_as_error:
description:
- Exclude dns queries to domains outside the domains configured in the dns application profile from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_invalid_dns_query_as_error:
description:
- Exclude invalid dns queries from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_no_dns_record_as_error:
description:
- Exclude queries to domains that did not have configured services/records from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_no_valid_gs_member_as_error:
description:
- Exclude queries to gslb services that have no available members from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_persistence_change_as_error:
description:
- Exclude persistence server changed while load balancing' from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_server_dns_error_as_error:
description:
- Exclude server dns error response from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_server_tcp_reset_as_error:
description:
- Exclude server tcp reset from errors.
- It is common for applications like ms exchange.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_syn_retransmit_as_error:
description:
- Exclude 'server unanswered syns' from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_tcp_reset_as_error:
description:
- Exclude tcp resets by client from the list of potential errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_unsupported_dns_query_as_error:
description:
- Exclude unsupported dns queries from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
hs_event_throttle_window:
description:
- Time window (in secs) within which only unique health change events should occur.
- Default value when not specified in API or module is interpreted by Avi Controller as 1209600.
hs_max_anomaly_penalty:
description:
- Maximum penalty that may be deducted from health score for anomalies.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
hs_max_resources_penalty:
description:
- Maximum penalty that may be deducted from health score for high resource utilization.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 25.
hs_max_security_penalty:
description:
- Maximum penalty that may be deducted from health score based on security assessment.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 100.
hs_min_dos_rate:
description:
- Dos connection rate below which the dos security assessment will not kick in.
- Default value when not specified in API or module is interpreted by Avi Controller as 1000.
hs_performance_boost:
description:
- Adds free performance score credits to health score.
- It can be used for compensating health score for known slow applications.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
hs_pscore_traffic_threshold_l4_client:
description:
- Threshold number of connections in 5min, below which apdexr, apdexc, rum_apdex, and other network quality metrics are not computed.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.0.
hs_pscore_traffic_threshold_l4_server:
description:
- Threshold number of connections in 5min, below which apdexr, apdexc, rum_apdex, and other network quality metrics are not computed.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.0.
hs_security_certscore_expired:
description:
- Score assigned when the certificate has expired.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.0.
hs_security_certscore_gt30d:
description:
- Score assigned when the certificate expires in more than 30 days.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_certscore_le07d:
description:
- Score assigned when the certificate expires in less than or equal to 7 days.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.0.
hs_security_certscore_le30d:
description:
- Score assigned when the certificate expires in less than or equal to 30 days.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
hs_security_chain_invalidity_penalty:
description:
- Penalty for allowing certificates with invalid chain.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
hs_security_cipherscore_eq000b:
description:
- Score assigned when the minimum cipher strength is 0 bits.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.0.
hs_security_cipherscore_ge128b:
description:
- Score assigned when the minimum cipher strength is greater than equal to 128 bits.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_cipherscore_lt128b:
description:
- Score assigned when the minimum cipher strength is less than 128 bits.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 3.5.
hs_security_encalgo_score_none:
description:
- Score assigned when no algorithm is used for encryption.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.0.
hs_security_encalgo_score_rc4:
description:
- Score assigned when rc4 algorithm is used for encryption.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.5.
hs_security_hsts_penalty:
description:
- Penalty for not enabling hsts.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
hs_security_nonpfs_penalty:
description:
- Penalty for allowing non-pfs handshakes.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
hs_security_selfsignedcert_penalty:
description:
- Deprecated.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
hs_security_ssl30_score:
description:
- Score assigned when supporting ssl3.0 encryption protocol.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 3.5.
hs_security_tls10_score:
description:
- Score assigned when supporting tls1.0 encryption protocol.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_tls11_score:
description:
- Score assigned when supporting tls1.1 encryption protocol.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_tls12_score:
description:
- Score assigned when supporting tls1.2 encryption protocol.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_weak_signature_algo_penalty:
description:
- Penalty for allowing weak signature algorithm(s).
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
name:
description:
- The name of the analytics profile.
required: true
ranges:
description:
- List of http status code ranges to be excluded from being classified as an error.
resp_code_block:
description:
- Block of http response codes to be excluded from being classified as an error.
- Enum options - AP_HTTP_RSP_4XX, AP_HTTP_RSP_5XX.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the analytics profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a custom Analytics profile object
avi_analyticsprofile:
controller: ''
username: ''
password: ''
apdex_response_threshold: 500
apdex_response_tolerated_factor: 4.0
apdex_rtt_threshold: 250
apdex_rtt_tolerated_factor: 4.0
apdex_rum_threshold: 5000
apdex_rum_tolerated_factor: 4.0
apdex_server_response_threshold: 400
apdex_server_response_tolerated_factor: 4.0
apdex_server_rtt_threshold: 125
apdex_server_rtt_tolerated_factor: 4.0
conn_lossy_ooo_threshold: 50
conn_lossy_timeo_rexmt_threshold: 20
conn_lossy_total_rexmt_threshold: 50
conn_lossy_zero_win_size_event_threshold: 2
conn_server_lossy_ooo_threshold: 50
conn_server_lossy_timeo_rexmt_threshold: 20
conn_server_lossy_total_rexmt_threshold: 50
conn_server_lossy_zero_win_size_event_threshold: 2
disable_se_analytics: false
disable_server_analytics: false
exclude_client_close_before_request_as_error: false
exclude_persistence_change_as_error: false
exclude_server_tcp_reset_as_error: false
exclude_syn_retransmit_as_error: false
exclude_tcp_reset_as_error: false
hs_event_throttle_window: 1209600
hs_max_anomaly_penalty: 10
hs_max_resources_penalty: 25
hs_max_security_penalty: 100
hs_min_dos_rate: 1000
hs_performance_boost: 20
hs_pscore_traffic_threshold_l4_client: 10.0
hs_pscore_traffic_threshold_l4_server: 10.0
hs_security_certscore_expired: 0.0
hs_security_certscore_gt30d: 5.0
hs_security_certscore_le07d: 2.0
hs_security_certscore_le30d: 4.0
hs_security_chain_invalidity_penalty: 1.0
hs_security_cipherscore_eq000b: 0.0
hs_security_cipherscore_ge128b: 5.0
hs_security_cipherscore_lt128b: 3.5
hs_security_encalgo_score_none: 0.0
hs_security_encalgo_score_rc4: 2.5
hs_security_hsts_penalty: 0.0
hs_security_nonpfs_penalty: 1.0
hs_security_selfsignedcert_penalty: 1.0
hs_security_ssl30_score: 3.5
hs_security_tls10_score: 5.0
hs_security_tls11_score: 5.0
hs_security_tls12_score: 5.0
hs_security_weak_signature_algo_penalty: 1.0
name: jason-analytics-profile
tenant_ref: Demo
'''
RETURN = '''
obj:
description: AnalyticsProfile (api/analyticsprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
apdex_response_threshold=dict(type='int',),
apdex_response_tolerated_factor=dict(type='float',),
apdex_rtt_threshold=dict(type='int',),
apdex_rtt_tolerated_factor=dict(type='float',),
apdex_rum_threshold=dict(type='int',),
apdex_rum_tolerated_factor=dict(type='float',),
apdex_server_response_threshold=dict(type='int',),
apdex_server_response_tolerated_factor=dict(type='float',),
apdex_server_rtt_threshold=dict(type='int',),
apdex_server_rtt_tolerated_factor=dict(type='float',),
client_log_config=dict(type='dict',),
client_log_syslog_config=dict(type='dict',),
conn_lossy_ooo_threshold=dict(type='int',),
conn_lossy_timeo_rexmt_threshold=dict(type='int',),
conn_lossy_total_rexmt_threshold=dict(type='int',),
conn_lossy_zero_win_size_event_threshold=dict(type='int',),
conn_server_lossy_ooo_threshold=dict(type='int',),
conn_server_lossy_timeo_rexmt_threshold=dict(type='int',),
conn_server_lossy_total_rexmt_threshold=dict(type='int',),
conn_server_lossy_zero_win_size_event_threshold=dict(type='int',),
description=dict(type='str',),
disable_se_analytics=dict(type='bool',),
disable_server_analytics=dict(type='bool',),
exclude_client_close_before_request_as_error=dict(type='bool',),
exclude_gs_down_as_error=dict(type='bool',),
exclude_http_error_codes=dict(type='list',),
exclude_invalid_dns_domain_as_error=dict(type='bool',),
exclude_invalid_dns_query_as_error=dict(type='bool',),
exclude_no_dns_record_as_error=dict(type='bool',),
exclude_no_valid_gs_member_as_error=dict(type='bool',),
exclude_persistence_change_as_error=dict(type='bool',),
exclude_server_dns_error_as_error=dict(type='bool',),
exclude_server_tcp_reset_as_error=dict(type='bool',),
exclude_syn_retransmit_as_error=dict(type='bool',),
exclude_tcp_reset_as_error=dict(type='bool',),
exclude_unsupported_dns_query_as_error=dict(type='bool',),
hs_event_throttle_window=dict(type='int',),
hs_max_anomaly_penalty=dict(type='int',),
hs_max_resources_penalty=dict(type='int',),
hs_max_security_penalty=dict(type='int',),
hs_min_dos_rate=dict(type='int',),
hs_performance_boost=dict(type='int',),
hs_pscore_traffic_threshold_l4_client=dict(type='float',),
hs_pscore_traffic_threshold_l4_server=dict(type='float',),
hs_security_certscore_expired=dict(type='float',),
hs_security_certscore_gt30d=dict(type='float',),
hs_security_certscore_le07d=dict(type='float',),
hs_security_certscore_le30d=dict(type='float',),
hs_security_chain_invalidity_penalty=dict(type='float',),
hs_security_cipherscore_eq000b=dict(type='float',),
hs_security_cipherscore_ge128b=dict(type='float',),
hs_security_cipherscore_lt128b=dict(type='float',),
hs_security_encalgo_score_none=dict(type='float',),
hs_security_encalgo_score_rc4=dict(type='float',),
hs_security_hsts_penalty=dict(type='float',),
hs_security_nonpfs_penalty=dict(type='float',),
hs_security_selfsignedcert_penalty=dict(type='float',),
hs_security_ssl30_score=dict(type='float',),
hs_security_tls10_score=dict(type='float',),
hs_security_tls11_score=dict(type='float',),
hs_security_tls12_score=dict(type='float',),
hs_security_weak_signature_algo_penalty=dict(type='float',),
name=dict(type='str', required=True),
ranges=dict(type='list',),
resp_code_block=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'analyticsprofile',
set([]))
if __name__ == '__main__':
main()
|
dav1x/ansible
|
lib/ansible/modules/network/avi/avi_analyticsprofile.py
|
Python
|
gpl-3.0
| 27,868
|
[
"VisIt"
] |
11fcb14a5545f8cf99b37e47081dce84397fedf50a2f80f48acfd62a5108255b
|
# ./gen_hamm_dct.py
# script generateing NN initialization for training with TNet
#
# author: Karel Vesely
# calling example:
# python gen_mlp_init.py --dimIn=598 --dimOut=135 --dimHid=1024:1024:1024
#
import math, random
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--dim', dest='dim', help='d1:d2:d3 layer dimensions in the network')
parser.add_option('--inst', dest='inst', help='i1:i2 number of weight instances per layer')
parser.add_option('--gauss', dest='gauss', help='use gaussian noise for weights', action='store_true', default=False)
parser.add_option('--negbias', dest='negbias', help='use uniform [-4.1,-3.9] for bias (default all 0.0)', action='store_true', default=False)
parser.add_option('--linBNdim', dest='linBNdim', help='dim of linear bottleneck (sigmoids will be omitted, bias will be zero)',default=0)
(options, args) = parser.parse_args()
if(options.dim == None):
parser.print_help()
sys.exit(1)
dimStrL = options.dim.split(':')
dimL = []
for i in range(len(dimStrL)):
dimL.append(int(dimStrL[i]))
instStrL = options.inst.split(':')
instL = []
for i in range(len(instStrL)):
instL.append(int(instStrL[i]))
#check the divisibility
assert(len(dimL) == len(instL)+1)
for i in range(len(instL)):
assert(dimL[i] % instL[i] == 0)
assert(dimL[i+1] % instL[i] == 0)
for layer in range(len(dimL)-1):
print '<discretelinearity>', dimL[layer+1], dimL[layer]
print instL[layer]
for inst in range(instL[layer]):
print 'm', dimL[layer+1]/instL[layer], dimL[layer]/instL[layer]
for row in range(dimL[layer+1]/instL[layer]):
for col in range(dimL[layer]/instL[layer]):
if(options.gauss):
print 0.1*random.gauss(0.0,1.0),
else:
print random.random()/5.0-0.1,
print
print 'v', dimL[layer+1]#/instL[layer]
for idx in range(dimL[layer+1]): #/instL[layer]):
if(int(options.linBNdim) == dimL[layer+1]):
print '0.0',
elif(options.negbias):
print random.random()/5.0-4.1,
else:
print '0.0',
print
if(int(options.linBNdim) != dimL[layer+1]):
print '<sigmoid>', dimL[layer+1], dimL[layer+1]
|
troylee/nnet-asr
|
tools/init/gen_discretelinearity_init.py
|
Python
|
apache-2.0
| 2,289
|
[
"Gaussian"
] |
9baf79cbcce03b538f73f387a291ca7df89effeff77776479eb55dcaefa223bd
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Brian Cherinka, José Sánchez-Gallego, and Brett Andrews
# @Date: 2016-04-11
# @Filename: rss.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
#
# @Last modified by: José Sánchez-Gallego (gallegoj@uw.edu)
# @Last modified time: 2018-08-04 14:06:38
from __future__ import division, print_function
import io
import os
import sys
import numpy
from brain.core.exceptions import BrainError
from flask import jsonify
from flask_classful import route
from sdss_access.path import Path
import marvin
from marvin.api.base import BaseView
from marvin.api.base import arg_validate as av
from marvin.core.exceptions import MarvinError
from marvin.utils.general import mangaid2plateifu, parseIdentifier
def _getRSS(name, use_file=True, release=None, **kwargs):
"""Retrieves a RSS Marvin object."""
drpver, __ = marvin.config.lookUpVersions(release)
rss = None
results = {}
# parse name into either mangaid or plateifu
try:
idtype = parseIdentifier(name)
except Exception as ee:
results['error'] = 'Failed to parse input name {0}: {1}'.format(name, str(ee))
return rss, results
filename = None
plateifu = None
mangaid = None
try:
if use_file:
if idtype == 'mangaid':
plate, ifu = mangaid2plateifu(name, drpver=drpver)
elif idtype == 'plateifu':
plate, ifu = name.split('-')
if Path is not None:
filename = Path().full('mangarss', ifu=ifu, plate=plate, drpver=drpver)
assert os.path.exists(filename), 'file not found.'
else:
raise MarvinError('cannot create path for MaNGA rss.')
else:
if idtype == 'plateifu':
plateifu = name
elif idtype == 'mangaid':
mangaid = name
else:
raise MarvinError('invalid plateifu or mangaid: {0}'.format(idtype))
rss = marvin.tools.RSS(filename=filename, mangaid=mangaid, plateifu=plateifu,
mode='local', release=release)
results['status'] = 1
except Exception as ee:
results['error'] = 'Failed to retrieve RSS {0}: {1}'.format(name, str(ee))
return rss, results
class RSSView(BaseView):
"""Class describing API calls related to RSS files."""
route_base = '/rss/'
@route('/<name>/', methods=['GET', 'POST'], endpoint='getRSS')
@av.check_args()
def get(self, args, name):
"""This method performs a get request at the url route /rss/<id>.
.. :quickref: RSS; Get an RSS given a plate-ifu or mangaid
:param name: The name of the cube as plate-ifu or mangaid
:form release: the release of MaNGA
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson json data: dictionary of returned data
:json string empty: the data dict is empty
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin/api/rss/8485-1901/ HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5"},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"data": {}
}
"""
# Pop any args we don't want going into Rss
args = self._pop_args(args, arglist='name')
rss, res = _getRSS(name, **args)
self.update_results(res)
if rss:
try:
nsa_data = rss.nsa
except (MarvinError, BrainError):
nsa_data = None
wavelength = (rss._wavelength.tolist() if isinstance(rss._wavelength, numpy.ndarray)
else rss._wavelength)
obsinfo = io.StringIO() if sys.version_info.major >= 3 else io.BytesIO()
rss.obsinfo.write(format='ascii', filename=obsinfo)
obsinfo.seek(0)
self.results['data'] = {'plateifu': name,
'mangaid': rss.mangaid,
'ra': rss.ra,
'dec': rss.dec,
'header': rss.header.tostring(),
'redshift': nsa_data.z if nsa_data else -9999,
'wavelength': wavelength,
'wcs_header': rss.wcs.to_header_string(),
'nfibers': rss._nfibers,
'obsinfo': obsinfo.read()}
return jsonify(self.results)
@route('/<name>/fibers/<fiberid>', methods=['GET', 'POST'], endpoint='getRSSFiber')
@av.check_args()
def getFiber(self, args, name, fiberid):
"""Returns a list of all the RSS arrays for a given fibre.
.. :quickref: RSS; Get a list of all the RSS arrays for a given fibre.
:param name: The name of the cube as plate-ifu or mangaid
:param fiberid: The fiberid of the fibre to retrieve.
:form release: the release of MaNGA
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson json data: dictionary of returned data
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin/api/rss/8485-1901/fibers/15 HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5"},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"data": {"flux": [1., 2., 3., ...]
"wavelength": [3621.6, 3622.43, 3623.26, ...],
"ivar: ...,
"mask: ...,
"dispersion": ...
...
}
}
"""
# Pop any args we don't want going into Rss
args = self._pop_args(args, arglist='name')
rss, res = _getRSS(name, **args)
self.update_results(res)
if rss:
self.results['data'] = {}
for ext in rss.data:
if ext.data is None or ext.name == 'OBSINFO':
continue
if ext.data.ndim == 2:
self.results['data'][ext.name] = ext.data[int(fiberid), :].tolist()
else:
self.results['data'][ext.name] = ext.data.tolist()
return jsonify(self.results)
|
albireox/marvin
|
python/marvin/api/rss.py
|
Python
|
bsd-3-clause
| 7,763
|
[
"Brian"
] |
b1e6b419b1d0860fa1b6024af0862c7fa4cfc58a6455ca1dddb615dc62ed4596
|
# -*- coding: UTF-8 -*-
# Copyright 2017-2020 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""General demo data for Lino Avanti.
- Course providers and courses
"""
# from django.conf import settings
# from lino.utils import mti
from lino.utils import Cycler # join_words
from lino.utils.mldbc import babel_named as named
from lino.api import rt, dd, _
from lino.modlib.users.choicelists import UserTypes
from lino_xl.lib.cal.choicelists import Recurrencies
from lino_xl.lib.courses.choicelists import EnrolmentStates
course_stages = [
_("Dispens"),
_("Eingeschrieben"),
_("Abgeschlossen"),
_("Abgebrochen"),
_("Ausgeschlossen")]
trends_config = []
trends_config.append((
_("Info Integration"),
[ "!Erstgespräch",
"Sprachtest",
"Einschreibung in Sprachkurs",
"Einschreibung in Integrationskurs",
"!Bilanzgespräch"]))
trends_config.append((_("Alphabetisation"), course_stages))
trends_config.append((_("A1"), course_stages))
trends_config.append((_("A2"), course_stages))
trends_config.append((_("Citizen course"), course_stages))
trends_config.append((_("Professional integration"), [
"Begleitet vom DSBE",
"Begleitet vom ADG",
"Erwerbstätigkeit",
]))
def objects():
Line = rt.models.courses.Line
Teacher = dd.plugins.courses.teacher_model
Course = rt.models.courses.Course
Topic = rt.models.courses.Topic
Enrolment = rt.models.courses.Enrolment
CourseStates = rt.models.courses.CourseStates
User = rt.models.users.User
EventType = rt.models.cal.EventType
Guest = rt.models.cal.Guest
GuestRole = rt.models.cal.GuestRole
GuestStates = rt.models.cal.GuestStates
EntryStates = rt.models.cal.EntryStates
Event = rt.models.cal.Event
Person = rt.models.contacts.Person
CommentType = rt.models.comments.CommentType
TrendStage = rt.models.trends.TrendStage
TrendArea = rt.models.trends.TrendArea
for area, stages in trends_config:
ta = named(TrendArea, area)
yield ta
for stage in stages:
kw = dict(trend_area=ta)
if stage[0] == "!":
stage = stage[1:]
kw.update(subject_column=True)
yield named(TrendStage, stage, **kw)
yield EventType(**dd.str2kw('name', _("First contact")))
kw = dd.str2kw('name', _("Lesson"))
kw.update(dd.str2kw('event_label', _("Lesson")))
event_type = EventType(**kw)
yield event_type
pupil = named(GuestRole, _("Pupil"))
yield pupil
yield named(GuestRole, _("Assistant"))
topic_citizen = named(Topic, _("Citizen course"))
yield topic_citizen
topic_lang = named(Topic, _("Language courses"))
yield topic_lang
kw.update(topic=topic_citizen)
kw = dict(event_type=event_type, guest_role=pupil)
yield named(Line, _("Citizen course"), **kw)
kw.update(topic=topic_lang)
alpha = named(Line, _("Alphabetisation"), **kw)
yield alpha
yield named(Line, _("German for beginners"), **kw)
yield named(Line, _("German A1+"), **kw)
yield named(Line, _("German A2"), **kw)
yield named(Line, _("German A2 (women)"), **kw)
yield named(CommentType, _("Phone call"))
yield named(CommentType, _("Visit"))
yield named(CommentType, _("Individual consultation"))
yield named(CommentType, _("Internal meeting"))
yield named(CommentType, _("Meeting with partners"))
laura = Teacher(first_name="Laura", last_name="Lieblig")
yield laura
yield User(username="laura", user_type=UserTypes.teacher,
partner=laura)
yield User(username="nathalie", user_type=UserTypes.user)
yield User(username="nelly", user_type=UserTypes.user)
yield User(username="audrey", user_type=UserTypes.auditor)
yield User(username="martina", user_type=UserTypes.coordinator)
yield User(username="sandra", user_type=UserTypes.secretary)
USERS = Cycler(User.objects.exclude(
user_type__in=(UserTypes.auditor, UserTypes.admin)))
kw = dict(monday=True, tuesday=True, thursday=True, friday=True)
kw.update(
line=alpha,
start_date=dd.demo_date(-30),
start_time="9:00", end_time="12:00",
max_date=dd.demo_date(10),
state=CourseStates.active,
every_unit=Recurrencies.daily,
user=USERS.pop(),
teacher=laura,
max_places=5)
yield Course(**kw)
kw.update(start_time="14:00", end_time="17:00", user=USERS.pop(),
max_places=15)
yield Course(**kw)
kw.update(start_time="18:00", end_time="20:00", user=USERS.pop(),
max_places=15)
yield Course(**kw)
PUPILS = Cycler(dd.plugins.courses.pupil_model.objects.all())
# print(20170302, dd.plugins.courses.pupil_model.objects.all())
COURSES = Cycler(Course.objects.all())
STATES = Cycler(EnrolmentStates.objects())
def fits(course, pupil):
if course.max_places and course.get_free_places() == 0:
return False
if Enrolment.objects.filter(course=course, pupil=pupil).count():
return False
return True
def enrol(pupil):
course = COURSES.pop()
if fits(course, pupil):
kw = dict(user=USERS.pop(), course=course, pupil=pupil)
kw.update(request_date=dd.demo_date(-i))
kw.update(state=STATES.pop())
return Enrolment(**kw)
for i, p in enumerate(
dd.plugins.courses.pupil_model.objects.order_by('id')):
yield enrol(p)
if i % 2 == 0:
yield enrol(p)
if i % 3 == 0:
yield enrol(p)
ar = rt.login('robin')
for obj in Course.objects.all():
obj.update_auto_events(ar)
# Suggested calendar entries older than 7 days should be marked as
# either took_place or cancelled.
qs = Event.objects.filter(
start_date__lte=dd.demo_date(-7),
state=EntryStates.suggested)
for i, obj in enumerate(qs):
if i % 9:
obj.state = EntryStates.took_place
else:
obj.state = EntryStates.cancelled
obj.full_clean()
obj.save()
# participants of events which took place should be marked as
# either absent or present or excused:
qs = Guest.objects.filter(
event__start_date__lte=dd.demo_date(-7),
event__state=EntryStates.took_place).order_by('id')
STATES = Cycler(GuestStates.get_list_items())
for i, obj in enumerate(qs):
obj.state = STATES.pop()
# if i % 8:
# obj.state = GuestStates.present
# elif i % 3:
# obj.state = GuestStates.missing
# else:
# obj.state = GuestStates.excused
obj.full_clean()
obj.save()
|
lino-framework/book
|
lino_book/projects/avanti1/fixtures/demo.py
|
Python
|
bsd-2-clause
| 6,751
|
[
"VisIt"
] |
bfe6ab5d0ecdb406bc3772ae0aa69bce08352e11b8129527277b93f5a794103a
|
#!/usr/bin/python3
# direct translation of extract_wb in python using as little external deps as possible
from __future__ import print_function
import sys
from sys import argv
import os
import xml.etree.ElementTree as ET
import subprocess
from subprocess import PIPE
import shlex
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
if len(argv) < 2 :
sys.exit("Usage: extract_wb <file1> [file2] ...")
IGNORED_PRESETS = {"Auto", "Kelvin", "Measured", "AsShot", "As Shot", "Preset",
"Natural Auto", "Multi Auto", "Color Temperature Enhancement",
"One Touch WB 1", "One Touch WB 2", "One Touch WB 3",
"One Touch WB 4", "Custom WB 1", "Auto0", "Auto1", "Auto2",
"Custom", "CWB1", "CWB2", "CWB3", "CWB4", "Black",
"Illuminator1", "Illuminator2", "Uncorrected"}
FL_PRESET_REPLACE = {
"Fluorescent" : "CoolWhiteFluorescent",
"FluorescentP1" : "DayWhiteFluorescent",
"FluorescentP2" : "DaylightFluorescent",
"FluorescentM1" : "WarmWhiteFluorescent",
"FluorescentD" : "DaylightFluorescent",
"FluorescentN" : "NeutralFluorescent",
"FluorescentW" : "WhiteFluorescent",
"Daylight Fluorescent" : "DaylightFluorescent",
"Day White Fluorescent" : "DayWhiteFluorescent",
"White Fluorescent" : "WhiteFluorescent",
"Unknown (0x600)" : "Underwater",
"Sunny" : "DirectSunlight",
"Fine Weather" : "DirectSunlight",
"Tungsten (Incandescent)" : "Tungsten",
"ISO Studio Tungsten" : "Tungsten",
"Cool WHT FL" : "CoolWhiteFluorescent",
"Daylight FL" : "DaylightFluorescent",
"Warm WHT FL" : "WarmWhiteFluorescent",
"Warm White Fluorescent" : "WarmWhiteFluorescent",
"White FL" : "WhiteFluorescent",
"Mercury Lamp" : "HighTempMercuryVaporFluorescent",
"Day White FL" : "DayWhiteFluorescent",
"Sodium Lamp" : "SodiumVaporFluorescent",
"3000K (Tungsten light)" : "Tungsten",
"4000K (Cool white fluorescent)" : "CoolWhiteFluorescent",
"5300K (Fine Weather)" : "Daylight",
"5500K (Flash)" : "Flash",
"6000K (Cloudy)" : "Cloudy",
"7500K (Fine Weather with Shade)" : "Shade",
}
PRESET_ORDER = ["DirectSunlight", "Daylight", "D55", "Shade","Cloudy",
"Tungsten", "Incandescent","Fluorescent",
"WarmWhiteFluorescent", "CoolWhiteFluorescent",
"DayWhiteFluorescent","DaylightFluorescent",
"DaylightFluorescent", "NeutralFluorescent", "WhiteFluorescent",
"HighTempMercuryVaporFluorescent", "HTMercury",
"SodiumVaporFluorescent", "Underwater", "Flash", "Unknown"]
PRESET_SORT_MAPPING = {}
for index,name in enumerate(PRESET_ORDER):
PRESET_SORT_MAPPING[name] = index + 1
cams_from_source = os.path.dirname(os.path.abspath(__file__)) + "/../src/external/rawspeed/data/cameras.xml"
cams_from_dist = os.path.dirname(os.path.abspath(__file__)) + "/../rawspeed/cameras.xml"
CAMERAS = os.path.abspath(cams_from_source) if os.path.exists(os.path.abspath(cams_from_source)) else os.path.abspath(cams_from_dist)
if not os.path.exists(CAMERAS):
sys.exit("Can't find cameras mapping file, should be in {0}".format(CAMERAS))
exif_name_map = {}
xml_doc = ET.parse(CAMERAS)
for camera in xml_doc.getroot().findall('Camera'):
maker = exif_maker = camera.get('make')
model = exif_model = camera.get('model')
exif_id = maker,model
if camera.find('ID') is not None:
cid = camera.find('ID')
maker = cid.get('make')
model = cid.get('model')
exif_name_map[exif_id] = maker,model
for alias in camera.findall('Aliases/Alias'):
exif_model = alias.text
exif_id = exif_maker, exif_model
exif_name_map[exif_id] = maker,model
found_presets = []
for filename in argv[1:]:
red = green = blue = maker = model = preset = None
finetune = fl_count = rlevel = blevel = glevel = 0
listed_presets = []
preset_names = {}
gm_skew = False
command = "exiftool -Make -Model \"-WBType*\" \"-WB_*\" \"-ColorTemp*\" "\
"-WhiteBalance -WhiteBalance2 -WhitePoint -ColorCompensationFilter "\
"-WBShiftAB -WBShiftAB_GM -WBShiftAB_GM_Precise -WBShiftGM -WBScale "\
"-WhiteBalanceFineTune -WhiteBalanceComp -WhiteBalanceSetting "\
"-WhiteBalanceBracket -WhiteBalanceBias -WBMode -WhiteBalanceMode "\
"-WhiteBalanceTemperature -WhiteBalanceDetected -ColorTemperature "\
"-WBShiftIntelligentAuto -WBShiftCreativeControl -WhiteBalanceSetup "\
"-WBRedLevel -WBBlueLevel -WBGreenLevel -RedBalance -BlueBalance "\
"\"{0}\"".format(filename)
if filename.endswith(('.txt','.TXT')):
command = 'cat "{0}"'.format(filename)
command = shlex.split(command)
proc = subprocess.check_output(command, universal_newlines=True)
for io in proc.splitlines():
lineparts = io.split(':')
tag = lineparts[0].strip()
values = lineparts[1].strip().split(' ')
if 'Make' in tag.split():
maker = lineparts[1].strip()
elif 'Model' in tag.split():
model = lineparts[1].strip()
elif tag == "WB RGGB Levels":
green = (float(values[1])+float(values[2]))/2.0
red = float(values[0])/green
blue = float(values[3])/green
green = 1
elif tag == "WB RB Levels":
red = float(values[0])
blue = float(values[1])
if len(values) == 4 and values[2] == "256" and values[3] == "256":
red /= 256.0
blue /= 256.0
green = 1
elif tag == "WB GRB Levels":
green = float(values[0])
red = float(values[1])/green
blue = float(values[2])/green
green = 1
# elif tag == "WB GRB Levels Auto" and maker == "FUJIFILM" # fuji seems to use "WB GRB Levels Auto to describe manual finetuning
# green = float(values[0])
# red = float(values[1])/green
# blue = float(values[2])/green
# green = 1
elif tag == "White Point" and len(values) > 3:
green = (float(values[1])+float(values[2]))/2.0
red = float(values[0])/green
blue = float(values[3])/green
green = 1
elif tag == "White Balance" or tag == "White Balance 2":
preset = ' '.join(values)
if preset in FL_PRESET_REPLACE:
preset = FL_PRESET_REPLACE[preset]
elif ' '.join(tag.split()[:2]) == "WB Type":
preset_names[' '.join(tag.split()[2:])] = ' '.join(values)
elif ' '.join(tag.split()[:3]) in ['WB RGB Levels', 'WB RGGB Levels', 'WB RB Levels']:
# todo - this codepath is weird
p = ''.join(tag.split()[3:])
if( p in preset_names):
p = preset_names[p]
r=g=b=0
if len(values) == 4 and ' '.join(tag.split()[:3]) in ['WB RB Levels']:
g = (float(values[2])+float(values[3]))/2.0
r = float(values[0])/g
b = float(values[1])/g
g = 1
elif len(values) == 4:
g = (float(values[1])+float(values[2]))/2.0
r = float(values[0])/g
b = float(values[3])/g
g = 1
elif len(values) == 3:
g = float(values[1])
r = float(values[0])/g
b = float(values[2])/g
g = 1
elif len(values) == 2 and ' '.join(tag.split()[:3]) in ['WB RB Levels']:
r = float(values[0])
b = float(values[2])
g = 1
else:
eprint("Found RGB tag '{0}' with {1} values instead of 2, 3 or 4".format(p, len(values)))
if 'Fluorescent' in p:
fl_count += 1
if not p:
p= 'Unknown'
if p not in IGNORED_PRESETS:
listed_presets.append(tuple([p,r,g,b]))
elif tag == "WB Red Level":
rlevel = float(values[0])
elif tag == "WB Blue Level":
blevel = float(values[0])
elif tag == "WB Green Level":
glevel = float(values[0])
elif tag == "WB Shift AB": # canon - positive is towards amber, panasonic/leica/pentax - positive is towards blue?
finetune = values[0]
elif tag == "WB Shift GM": # detect GM shift and warn about it
gm_skew = gm_skew or (int(values[0]) != 0)
elif tag == "WB Shift AB GM": # Sony
finetune = values[0]
gm_skew = gm_skew or (int(values[1]) != 0)
elif tag == "WB Shift AB GM Precise" and maker.startswith("SONY"): # Sony
finetune = int(float(values[0]) * 2.0)
gm_skew = gm_skew or (float(values[1]) != 0.0)
elif tag == "White Balance Fine Tune" and maker.startswith("NIKON"): # nikon
finetune = 0-(int(values[0]) * 2) # nikon lies about half-steps (eg 6->6->5 instead of 6->5.5->5, need to address this later on, so rescalling this now)
gm_skew = gm_skew or (int(values[1]) != 0)
elif tag == "White Balance Fine Tune" and maker == "FUJIFILM" and int(values[3]) != 0: # fuji
eprint("Warning: Fuji does not seem to produce any sensible data for finetuning! If all finetuned values are identical, use one with no finetuning (0)")
finetune = int(values[3]) / 20 # Fuji has -180..180 but steps are every 20
gm_skew = gm_skew or (int(values[1].replace(',','')) != 0)
elif tag == "White Balance Fine Tune" and maker == "SONY" and preset == "CoolWhiteFluorescent":
# Sony's Fluorescent Fun
if values[0] == "-1":
preset = "WarmWhiteFluorescent"
elif values[0] == "0":
preset = "CoolWhiteFluorescent"
elif values[0] == "1":
preset = "DayWhiteFluorescent"
elif values[0] == "2":
preset = "DaylightFluorescent"
else:
eprint("Warning: Unknown Sony Fluorescent WB Preset!")
elif tag == "White Balance Bracket": # olympus
finetune = values[0]
gm_skew = gm_skew or (int(values[1]) != 0)
elif tag == "Color Compensation Filter": # minolta?
gm_skew = gm_skew or (int(values[0]) != 0)
if rlevel > 0 and glevel > 0 and blevel > 0:
red = rlevel/glevel
blue = blevel/glevel
green = 1
if gm_skew:
eprint('WARNING: {0} has finetuning over GM axis! Data is skewed!'.format(filename))
# Adjust the maker/model we found with the map we generated before
if exif_name_map[maker,model]:
enm = exif_name_map[maker,model]
maker = enm[0]
model = enm[1]
else:
eprint("WARNING: Couldn't find model in cameras.xml ('{0}', '{1}')".format(maker, model))
for preset_arr in listed_presets:
# ugly hack. Canon's Fluorescent is listed as WhiteFluorescent in usermanual
preset_arrv = list(preset_arr)
if maker and maker == "Canon" and preset_arrv[0] == "Fluorescent":
preset_arrv[0] = "WhiteFluorescent"
if preset_arrv[0] in FL_PRESET_REPLACE:
preset_arrv[0] = FL_PRESET_REPLACE[preset_arrv[0]]
if preset_arrv[0] not in IGNORED_PRESETS:
found_presets.append(tuple([maker,model,preset_arrv[0], 0, preset_arrv[1], preset_arrv[2], preset_arrv[3]]))
# Print out the WB value that was used in the file
if not preset:
preset = filename
if red and green and blue and preset not in IGNORED_PRESETS:
found_presets.append(tuple([maker, model, preset, int(finetune), red, green, blue]))
# get rid of duplicate presets
found_presets = list(set(found_presets))
def preset_to_sort(preset):
sort_for_preset = 0
if preset[2] in IGNORED_PRESETS:
sort_for_preset = 0
elif preset[2] in PRESET_SORT_MAPPING:
sort_for_preset = PRESET_SORT_MAPPING[preset[2]]
elif preset[2].endswith('K'):
sort_for_preset = int(preset[2][:-1])
else:
eprint("WARNING: no defined sort order for '{0}'".format(preset[2]))
return tuple([preset[0], preset[1], sort_for_preset, preset[3], preset[4], preset[5], preset[6]])
found_presets.sort(key=preset_to_sort)
min_padding = 0
for preset in found_presets:
if len(preset[2]) > min_padding:
min_padding = len(preset[2])
#dealing with Nikon half-steps
for index in range(len(found_presets)-1):
if (found_presets[index][0] == 'Nikon' and #case now translated
found_presets[index+1][0] == found_presets[index][0] and
found_presets[index+1][1] == found_presets[index][1] and
found_presets[index+1][2] == found_presets[index][2] and
found_presets[index+1][3] == found_presets[index][3]) :
curr_finetune = int(found_presets[index][3])
if curr_finetune < 0:
found_presets[index+1] = list(found_presets[index+1])
found_presets[index+1][3] = (int(found_presets[index+1][3]) + 1)
found_presets[index+1] = tuple(found_presets[index+1])
elif curr_finetune > 0:
found_presets[index] = list(found_presets[index])
found_presets[index][3] = (curr_finetune) - 1
found_presets[index] = tuple(found_presets[index])
# check for gaps in finetuning for half-steps (seems that nikon and sony can have half-steps)
for index in range(len(found_presets)-1):
if ( (found_presets[index][0] == "Nikon" or found_presets[index][0] == "Sony") and #case now translated
found_presets[index+1][0] == found_presets[index][0] and ##
found_presets[index+1][1] == found_presets[index][1] and
found_presets[index+1][2] == found_presets[index][2]) :
found_presets[index] = list(found_presets[index])
found_presets[index+1] = list(found_presets[index+1])
if (found_presets[index+1][3] % 2 == 0 and
found_presets[index][3] % 2 == 0 and
found_presets[index+1][3] == found_presets[index][3] + 2):
#detected gap eg -12 -> -10. slicing in half to undo multiplication done earlier
found_presets[index][3] = int(found_presets[index][3] / 2)
found_presets[index+1][3] = int(found_presets[index+1][3] / 2)
elif (found_presets[index+1][3] % 2 == 0 and
found_presets[index][3] % 2 == 1 and
found_presets[index+1][3] == (found_presets[index][3] + 1)*2 and
(index + 2 == len(found_presets) or
found_presets[index+2][2] != found_presets[index+1][2] ) ):
#dealing with corner case of last-halfstep not being dealth with earlier
found_presets[index+1][3] = int(found_presets[index+1][3] / 2)
found_presets[index] = tuple(found_presets[index])
found_presets[index+1] = tuple(found_presets[index+1])
#detect lazy finetuning (will not complain if there's no finetuning)
lazy_finetuning = []
for index in range(len(found_presets)-1):
if (found_presets[index+1][0] == found_presets[index][0] and ##
found_presets[index+1][1] == found_presets[index][1] and
found_presets[index+1][2] == found_presets[index][2] and
found_presets[index+1][3] != ((found_presets[index][3])+1) ):
# found gap. complain about needing to interpolate
lazy_finetuning.append(tuple([found_presets[index][0], found_presets[index][1], found_presets[index][2]]))
# Get rid of duplicate lazy finetuning reports
lazy_finetuning = list(set(lazy_finetuning))
# $stderr.puts lazy_finetuning.inspect.gsub("], ", "],\n") # debug content
for lazy in lazy_finetuning:
eprint("Gaps detected in finetuning for {0} {1} preset {2}, dt will need to interpolate!".format(lazy[0], lazy[1], lazy[2]))
for preset in found_presets:
if preset[2] in IGNORED_PRESETS:
eprint("Ignoring preset '{0}'".format(preset[2]))
else:
preset_name = ''
if preset[2].endswith('K'):
preset_name = '"'+preset[2]+'"'
else:
preset_name = preset[2]
print(' {{ "{0}", "{1}", {2:<{min_pad}}, {3}, {{ {4}, {5}, {6}, 0 }} }},'.format(preset[0], preset[1], preset_name, preset[3], preset[4], preset[5], preset[6], min_pad=min_padding))
|
peterbud/darktable
|
tools/extract_wb.py
|
Python
|
gpl-3.0
| 16,361
|
[
"Amber"
] |
b19c2efcb98cd1196cac46a89f0db3a403ba937f92d941a58b8c2cc3b41baeb7
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2014 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
cocos.director.director is the singleton that creates and handles the main ``Window``
and manages the logic behind the ``Scenes``.
Initializing
------------
The first thing to do, is to initialize the ``director``::
from cocos.director import director
director.init( parameters )
This will initialize the director, and will create a display area
(a 640x480 window by default).
The parameters that are supported by director.init() are the same
parameters that are supported by pyglet.window.Window(), plus a few
cocos exclusive ones. They are all named parameters (kwargs).
See ``Director.init()`` for details.
Example::
director.init( width=800, height=600, caption="Hello World", fullscreen=True )
Running a Scene
----------------
Once you have initialized the director, you can run your first ``Scene``::
director.run( Scene( MyLayer() ) )
This will run a scene that has only 1 layer: ``MyLayer()``. You can run a scene
that has multiple layers. For more information about ``Layers`` and ``Scenes``
refer to the ``Layers`` and ``Scene`` documentation.
Once a scene is running you can do the following actions:
* ``director.replace( new_scene ):``
Replaces the running scene with the new_scene
You could also use a transition. For example:
director.replace( SplitRowsTransition( new_scene, duration=2 ) )
* ``director.push( new_scene ):``
The running scene will be pushed to a queue of scenes to run,
and new_scene will be executed.
* ``director.pop():``
Will pop out a scene from the queue, and it will replace the running scene.
* ``director.scene.end( end_value ):``
Finishes the current scene with an end value of ``end_value``. The next scene
to be run will be popped from the queue.
Other functions you can use are:
* ``director.get_window_size():``
Returns an (x,y) pair with the _logical_ dimensions of the display.
The display might have been resized, but coordinates are always relative
to this size. If you need the _physical_ dimensions, check the dimensions
of ``director.window``
* ``get_virtual_coordinates(self, x, y):``
Transforms coordinates that belongs the real (physical) window size, to
the coordinates that belongs to the virtual (logical) window. Returns
an x,y pair in logical coordinates.
The director also has some useful attributes:
* ``director.return_value``: The value returned by the last scene that
called ``director.scene.end``. This is useful to use scenes somewhat like
function calls: you push a scene to call it, and check the return value
when the director returns control to you.
* ``director.window``: This is the pyglet window handled by this director,
if you happen to need low level access to it.
* ``self.show_FPS``: You can set this to a boolean value to enable, disable
the framerate indicator.
* ``self.scene``: The scene currently active
'''
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
import sys
from os import getenv
import pyglet
from pyglet import window, event
from pyglet import clock
#from pyglet import media
from pyglet.gl import *
import cocos, cocos.audio, cocos.custom_clocks
if hasattr(sys, 'is_cocos_sphinx') and sys.is_cocos_sphinx:
__all__ = ['director', 'Director', 'DefaultHandler']
else:
__all__ = ['director', 'DefaultHandler']
class DefaultHandler( object ):
def __init__(self):
super(DefaultHandler,self).__init__()
self.wired = False
def on_key_press( self, symbol, modifiers ):
if symbol == pyglet.window.key.F and (modifiers & pyglet.window.key.MOD_ACCEL):
director.window.set_fullscreen( not director.window.fullscreen )
return True
elif symbol == pyglet.window.key.P and (modifiers & pyglet.window.key.MOD_ACCEL):
import cocos.scenes.pause as pause
pause_sc = pause.get_pause_scene()
if pause:
director.push( pause_sc )
return True
elif symbol == pyglet.window.key.W and (modifiers & pyglet.window.key.MOD_ACCEL):
# import wired
if self.wired == False:
glDisable(GL_TEXTURE_2D);
glPolygonMode(GL_FRONT, GL_LINE);
glPolygonMode(GL_BACK, GL_LINE);
# wired.wired.install()
# wired.wired.uset4F('color', 1.0, 1.0, 1.0, 1.0 )
self.wired = True
else:
glEnable(GL_TEXTURE_2D);
glPolygonMode(GL_FRONT, GL_FILL);
glPolygonMode(GL_BACK, GL_FILL);
self.wired = False
# wired.wired.uninstall()
return True
elif symbol == pyglet.window.key.X and (modifiers & pyglet.window.key.MOD_ACCEL):
director.show_FPS = not director.show_FPS
return True
elif symbol == pyglet.window.key.I and (modifiers & pyglet.window.key.MOD_ACCEL):
from layer import PythonInterpreterLayer
if not director.show_interpreter:
if director.python_interpreter == None:
director.python_interpreter = cocos.scene.Scene( PythonInterpreterLayer() )
director.python_interpreter.enable_handlers( True )
director.python_interpreter.on_enter()
director.show_interpreter = True
else:
director.python_interpreter.on_exit()
director.show_interpreter= False
return True
elif symbol == pyglet.window.key.S and (modifiers & pyglet.window.key.MOD_ACCEL):
import time
pyglet.image.get_buffer_manager().get_color_buffer().save('screenshot-%d.png' % (int( time.time() ) ) )
return True
if symbol == pyglet.window.key.ESCAPE:
director.pop()
return True
class Director(event.EventDispatcher):
"""Class that creates and handle the main Window and manages how
and when to execute the Scenes
You should not directly instantiate the class, instead you do::
from cocos.director import director
to access the only one Director instance.
"""
#: a dict with locals for the interactive python interpreter (fill with what you need)
interpreter_locals = {}
def init(self, *args, **kwargs):
"""
Initializes the Director creating the main window.
There are a few cocos exclusive parameters, the rest are the
standard pyglet parameters for pyglet.window.Window.__init__
This docstring only partially list the pyglet parameteres; a full
list is available at pyglet Window API Reference at
http://pyglet.org/doc/api/pyglet.window.Window-class.html
:Parameters:
`do_not_scale` : bool
False: on window resizes, cocos will scale the view so that your
app don't need to handle resizes.
True: your app must include logic to deal with diferent window
sizes along the session.
Defaults to False
`audio_backend` : string
one in ['pyglet','sdl']. Defaults to 'pyglet' for legacy support.
`audio` : dict or None
None or a dict providing parameters for the sdl audio backend.
None: in this case a "null" audio system will be used, where all the
sdl sound operations will be no-ops. This may be useful if you do not
want to depend on SDL_mixer
A dictionary with string keys; these are the arguments for setting up
the audio output (sample rate and bit-width, channels, buffer size).
The key names/values should match the positional arguments of
http://www.pygame.org/docs/ref/mixer.html#pygame.mixer.init
The default value is {}, which means sound enabled with default
settings
`fullscreen` : bool
Window is created in fullscreen. Default is False
`resizable` : bool
Window is resizable. Default is False
`vsync` : bool
Sync with the vertical retrace. Default is True
`width` : int
Window width size. Default is 640
`height` : int
Window height size. Default is 480
`caption` : string
Window title.
`visible` : bool
Window is visible or not. Default is True.
:rtype: pyglet.window.Window
:returns: The main window, an instance of pyglet.window.Window class.
"""
#: whether or not the FPS are displayed
self.show_FPS = False
#: stack of scenes
self.scene_stack = []
#: scene that is being run
self.scene = None
#: this is the next scene that will be shown
self.next_scene = None
# python interpreter
self.python_interpreter = None
#: whether or not to show the python interpreter
self.show_interpreter = False
#: flag requesting app termination
self.terminate_app = False
# pop out the Cocos-specific flags
self.do_not_scale_window = kwargs.pop('do_not_scale', False)
audio_backend = kwargs.pop('audio_backend', 'pyglet')
audio_settings = kwargs.pop('audio', {})
# handle pyglet 1.1.x vs 1.2dev differences in fullscreen
self._window_virtual_width = kwargs.get('width', None)
self._window_virtual_height = kwargs.get('height', None)
if pyglet.version.startswith('1.1') and kwargs.get('fullscreen', False):
# pyglet 1.1.x dont allow fullscreen with explicit width or height
kwargs.pop('width', 0)
kwargs.pop('height', 0)
#: pyglet's window object
self.window = window.Window( *args, **kwargs )
# complete the viewport geometry info, both virtual and real,
# also set the appropiate on_resize handler
if self._window_virtual_width is None:
self._window_virtual_width = self.window.width
if self._window_virtual_height is None:
self._window_virtual_height = self.window.height
self._window_virtual_aspect = (
self._window_virtual_width / float( self._window_virtual_height ))
self._offset_x = 0
self._offset_y = 0
if self.do_not_scale_window:
resize_handler = self.unscaled_resize_window
self.set_projection = self.set_projection2D
else:
resize_handler = self.scaled_resize_window
self.set_projection = self.set_projection3D
# the offsets and size for the viewport will be proper after this
self._resize_no_events = True
resize_handler(self.window.width, self.window.height)
self._resize_no_events = False
self.window.push_handlers(on_resize=resize_handler)
self.window.push_handlers(self.on_draw)
# opengl settings
self.set_alpha_blending()
# default handler
self.window.push_handlers( DefaultHandler() )
# Environment variable COCOS2d_NOSOUND=1 overrides audio settings
if getenv('COCOS2D_NOSOUND', None) == '1' or audio_backend == 'pyglet':
audio_settings = None
# if audio is not working, better to not work at all. Except if
# explicitely instructed to continue
if not cocos.audio._working and audio_settings is not None:
from cocos.audio.exceptions import NoAudioError
msg = "cocos.audio isn't able to work without needed dependencies. " \
"Try installing pygame for fixing it, or forcing no audio " \
"mode by calling director.init with audio=None, or setting the " \
"COCOS2D_NOSOUND=1 variable in your env."
raise NoAudioError(msg)
# Audio setup:
#TODO: reshape audio to not screw unittests
import os
if not os.environ.get('cocos_utest', False):
cocos.audio.initialize(audio_settings)
return self.window
fps_display = None
def set_show_FPS(self, value):
if value and self.fps_display is None:
self.fps_display = clock.ClockDisplay()
elif not value and self.fps_display is not None:
self.fps_display.unschedule()
self.fps_display = None
show_FPS = property(lambda self: self.fps_display is not None,
set_show_FPS)
def run(self, scene):
"""Runs a scene, entering in the Director's main loop.
:Parameters:
`scene` : `Scene`
The scene that will be run.
"""
self._set_scene( scene )
event_loop.run()
def set_recorder(self, framerate, template="frame-%d.png", duration=None):
'''Will replace the app clock so that now we can ensure a steady
frame rate and save one image per frame
:Parameters
`framerate`: int
the number of frames per second
`template`: str
the template that will be completed with an in for the name of the files
`duration`: float
the amount of seconds to record, or 0 for infinite
'''
clock = cocos.custom_clocks.get_recorder_clock(framerate, template, duration)
cocos.custom_clocks.set_app_clock(clock)
def on_draw( self ):
"""Handles the event 'on_draw' from the pyglet.window.Window
Realizes switch to other scene and app termination if needed
Clears the window area
The windows is painted as:
- Render the current scene by calling it's visit method
- Eventualy draw the fps metter
- Eventually draw the interpreter
When the window is minimized any pending switch to scene will be
delayed to the next de-minimizing time.
"""
# typically True when window minimized
if ((self.window.width==0 or self.window.height==0) and
not self.terminate_app):
# if surface area is zero, we don't need to draw; also
# we dont't want to allow scene changes in this situation: usually
# on_enter does some scaling, which would lead to division by zero
return
# handle scene changes and app termination
if self.terminate_app:
self.next_scene = None
if self.next_scene is not None or self.terminate_app:
self._set_scene( self.next_scene )
if self.terminate_app:
pyglet.app.exit()
return
self.window.clear()
# draw all the objects
glPushMatrix()
self.scene.visit()
glPopMatrix()
# finally show the FPS
if self.show_FPS:
self.fps_display.draw()
if self.show_interpreter:
self.python_interpreter.visit()
def push(self, scene):
"""Suspends the execution of the running scene, pushing it
on the stack of suspended scenes. The new scene will be executed.
:Parameters:
`scene` : `Scene`
It is the scene that will be run.
"""
self.dispatch_event("on_push", scene )
def on_push( self, scene ):
self.next_scene = scene
self.scene_stack.append( self.scene )
def pop(self):
"""If the scene stack is empty the appication is terminated.
Else pops out a scene from the stack and sets as the running one.
"""
self.dispatch_event("on_pop")
def on_pop(self):
if len(self.scene_stack)==0:
self.terminate_app = True
else:
self.next_scene = self.scene_stack.pop()
def replace(self, scene):
"""Replaces the running scene with a new one. The running scene is terminated.
:Parameters:
`scene` : `Scene`
It is the scene that will be run.
"""
self.next_scene = scene
def _set_scene(self, scene ):
"""Makes scene the current scene
Operates on behalf of the public scene switching methods
User code must not call directly
"""
# Even library code should not call it directly: instead set
# ._next_scene and let 'on_draw' call here at the proper time
self.next_scene = None
# always true except for first scene in the app
if self.scene is not None:
self.scene.on_exit()
self.scene.enable_handlers( False )
old = self.scene
self.scene = scene
# always true except when terminating the app
if self.scene is not None:
self.scene.enable_handlers( True )
scene.on_enter()
return old
#
# Window Helper Functions
#
def get_window_size( self ):
"""Returns the size of the window when it was created, and not the
actual size of the window.
Usually you don't want to know the current window size, because the
Director() hides the complexity of rescaling your objects when
the Window is resized or if the window is made fullscreen.
If you created a window of 640x480, the you should continue to place
your objects in a 640x480 world, no matter if your window is resized or not.
Director will do the magic for you.
:rtype: (x,y)
:returns: The size of the window when it was created
"""
return ( self._window_virtual_width, self._window_virtual_height)
def get_virtual_coordinates( self, x, y ):
"""Transforms coordinates that belongs the *real* window size, to the
coordinates that belongs to the *virtual* window.
For example, if you created a window of 640x480, and it was resized
to 640x1000, then if you move your mouse over that window,
it will return the coordinates that belongs to the newly resized window.
Probably you are not interested in those coordinates, but in the coordinates
that belongs to your *virtual* window.
:rtype: (x,y)
:returns: Transformed coordinates from the *real* window to the *virtual* window
"""
x_diff = self._window_virtual_width / float( self.window.width - self._offset_x * 2 )
y_diff = self._window_virtual_height / float( self.window.height - self._offset_y * 2 )
adjust_x = (self.window.width * x_diff - self._window_virtual_width ) / 2
adjust_y = (self.window.height * y_diff - self._window_virtual_height ) / 2
return ( int( x_diff * x) - adjust_x, int( y_diff * y ) - adjust_y )
def scaled_resize_window( self, width, height):
"""One of two possible methods that are called when the main window is resized.
This implementation scales the display such that the initial resolution
requested by the programmer (the "logical" resolution) is always retained
and the content scaled to fit the physical display.
This implementation also sets up a 3D projection for compatibility with the
largest set of Cocos transforms.
The other implementation is `unscaled_resize_window`.
:Parameters:
`width` : Integer
New width
`height` : Integer
New height
"""
# physical view size
pw, ph = width, height
# virtual (desired) view size
vw, vh = self.get_window_size()
# desired aspect ratio
v_ar = vw/float(vh)
# usable width, heigh
uw = int(min(pw, ph*v_ar))
uh = int(min(ph, pw/v_ar))
ox = (pw-uw)//2
oy = (ph-uh)//2
self._offset_x = ox
self._offset_y = oy
self._usable_width = uw
self._usable_height = uh
self.set_projection()
if self._resize_no_events:
# setting viewport geometry, not handling an event
return
# deprecated - see issue 154
self.dispatch_event("on_resize", width, height)
self.dispatch_event("on_cocos_resize", self._usable_width, self._usable_height)
# dismiss the pyglet BaseWindow default 'on_resize' handler
return pyglet.event.EVENT_HANDLED
def unscaled_resize_window(self, width, height):
"""One of two possible methods that are called when the main window is resized.
This implementation does not scale the display but rather forces the logical
resolution to match the physical one.
This implementation sets up a 2D projection, resulting in the best pixel
alignment possible. This is good for text and other detailed 2d graphics
rendering.
The other implementation is `scaled_resize_window`.
:Parameters:
`width` : Integer
New width
`height` : Integer
New height
"""
self._usable_width = width
self._usable_height = height
if self._resize_no_events:
# setting viewport geometry, not handling an event
return
# deprecated - see issue 154
self.dispatch_event("on_resize", width, height)
self.dispatch_event("on_cocos_resize", self._usable_width, self._usable_height)
def set_projection(self):
"""
placeholder, will be set to one of set_projection2D or set_projection3D
when director.init is called
"""
pass
def set_projection3D(self):
'''Sets a 3D projection mantaining the aspect ratio of the original window size'''
# virtual (desired) view size
vw, vh = self.get_window_size()
glViewport(self._offset_x, self._offset_y, self._usable_width, self._usable_height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60, self._usable_width/float(self._usable_height), 0.1, 3000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt( vw/2.0, vh/2.0, vh/1.1566, # eye
vw/2.0, vh/2.0, 0, # center
0.0, 1.0, 0.0 # up vector
)
def set_projection2D(self):
"""Sets a 2D projection (ortho) covering all the window"""
# called only for the side effect of setting matrices in pyglet
self.window.on_resize(self._usable_width, self._usable_height)
#
# Misc functions
#
def set_alpha_blending( self, on=True ):
"""
Enables/Disables alpha blending in OpenGL
using the GL_ONE_MINUS_SRC_ALPHA algorithm.
On by default.
"""
if on:
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
else:
glDisable(GL_BLEND)
def set_depth_test( sefl, on=True ):
'''Enables z test. On by default
'''
if on:
glClearDepth(1.0)
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LEQUAL)
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)
else:
glDisable( GL_DEPTH_TEST )
event_loop = pyglet.app.event_loop
if not hasattr(event_loop, "event"):
event_loop = pyglet.app.EventLoop()
director = Director()
director.event = event_loop.event
"""The singleton; check `cocos.director.Director` for details on usage.
Don't instantiate Director(). Just use this singleton."""
director.interpreter_locals["director"] = director
director.interpreter_locals["cocos"] = cocos
Director.register_event_type('on_push')
Director.register_event_type('on_pop')
Director.register_event_type('on_resize')
Director.register_event_type('on_cocos_resize')
|
Alwnikrotikz/los-cocos
|
cocos/director.py
|
Python
|
bsd-3-clause
| 25,909
|
[
"VisIt"
] |
58623547da7b3b65d47f2d8febdfacf37ee8a802a79102957a3cd9033a4ed845
|
ALLOWED = {"a",
"abandoned",
"able",
"aborted",
"abortion",
"about",
"above",
"absolute",
"absolutely",
"abuse",
"accept",
"acceptable",
"according",
"accountants",
"accounting",
"across",
"act",
"action",
"actually",
"ad",
"add",
"address",
"administration",
"ads",
"advantage",
"advertise",
"afford",
"afraid",
"after",
"again",
"against",
"age",
"ago",
"agree",
"agreed",
"agreement",
"agreements",
"air",
"airport",
"airports",
"all",
"allegiance",
"allow",
"allowed",
"almost",
"along",
"also",
"although",
"always",
"am",
"amazing",
"amazingly",
"amendment",
"america",
"american",
"americans",
"americas",
"among",
"amount",
"an",
"and",
"angeles",
"anger",
"animal",
"announce",
"announced",
"announcement",
"announces",
"announcing",
"another",
"answer",
"anti",
"any",
"anybody",
"anymore",
"anyone",
"anything",
"anywhere",
"apart",
"apartment",
"apartments",
"apologized",
"appear",
"applause",
"apple",
"appreciate",
"appropriate",
"arabia",
"are",
"area",
"aren't",
"arm",
"army",
"around",
"arrange",
"arrive",
"arsenal",
"art",
"artificial",
"artificially",
"as",
"ask",
"asked",
"asking",
"asphalt",
"assets",
"assimilate",
"assimilation",
"assume",
"assuming",
"assurance",
"at",
"athlete",
"atlantic",
"atom",
"attacked",
"audience",
"authorities",
"automobile",
"avenue",
"away",
"ba",
"babies",
"baby",
"bac",
"back",
"bad",
"badly",
"ball",
"band",
"bank",
"bankrupt",
"banks",
"bar",
"barack",
"barron",
"base",
"based",
"basic",
"bat",
"battlefield",
"be",
"bear",
"beat",
"beating",
"beautiful",
"beauty",
"became",
"because",
"become",
"becoming",
"bed",
"beef",
"been",
"before",
"beg",
"began",
"begin",
"behave",
"behind",
"being",
"believe",
"believers",
"bell",
"below",
"ben",
"bergdahl",
"bernie",
"beside",
"best",
"better",
"between",
"beyond",
"bicycle",
"bid",
"bidder",
"biden",
"big",
"bigger",
"biggest",
"bill",
"billion",
"bird",
"bit",
"black",
"bloated",
"block",
"blocks",
"blood",
"blow",
"blown",
"blue",
"board",
"boat",
"body",
"boeing",
"bone",
"book",
"border",
"borders",
"born",
"borrowings",
"both",
"bother",
"bottom",
"bought",
"box",
"boy",
"brady",
"brag",
"brain",
"branch",
"brand",
"bread",
"break",
"breaks",
"bridge",
"bridges",
"bright",
"brilliantly",
"bring",
"bringing",
"broad",
"broadcast",
"broke",
"broken",
"brooklyn",
"brother",
"brought",
"brown",
"bubble",
"bubblewe",
"budget",
"buffaloed",
"build",
"builder",
"building",
"buildings",
"builds",
"built",
"bullet",
"bunch",
"burn",
"bush",
"business",
"businesses",
"businessman",
"busy",
"but",
"buy",
"buzzer",
"by",
"caesars",
"california",
"call",
"called",
"calls",
"came",
"camp",
"campaign",
"campaigns",
"can",
"can't",
"canada",
"candidate",
"candidates",
"cannot",
"capable",
"capita",
"capital",
"captain",
"car",
"card",
"cards",
"care",
"careful",
"carly",
"carolina",
"carry",
"cars",
"carson",
"cartel",
"case",
"cat",
"catastrophe",
"catch",
"caterpillar",
"caught",
"cause",
"caused",
"cell",
"cent",
"center",
"century",
"certain",
"certainly",
"certify",
"cetera",
"chair",
"chairman",
"challenged",
"chance",
"change",
"changed",
"changing",
"chapter",
"character",
"charge",
"charged",
"charging",
"charities",
"chart",
"check",
"cheering",
"cheerleader",
"cheerleading",
"cherish",
"chevrolet",
"chick",
"chief",
"child",
"children",
"china",
"choice",
"choose",
"chord",
"chris",
"christians",
"christie",
"chunk",
"circle",
"circuit",
"circumstances",
"citizen",
"city",
"claim",
"class",
"clause",
"clean",
"clear",
"clearance",
"climb",
"clinton",
"clock",
"close",
"closing",
"clothe",
"cloud",
"clue",
"cnn",
"coast",
"coat",
"cold",
"collect",
"college",
"colony",
"color",
"column",
"come",
"comes",
"coming",
"comment",
"commissioner",
"common",
"companies",
"company",
"compare",
"compared",
"compete",
"competence",
"competent",
"competition",
"complete",
"complex",
"concept",
"concerned",
"condition",
"conditioner",
"congratulations",
"congress",
"connect",
"conservative",
"consider",
"considering",
"consonant",
"contain",
"continent",
"continue",
"continuously",
"contributed",
"contribution",
"contributor",
"control",
"controlled",
"convention",
"converting",
"convincing",
"cook",
"cool",
"copy",
"core",
"corey",
"corn",
"corner",
"corps",
"correct",
"correctness",
"corrupt",
"cost",
"costs",
"cotton",
"could",
"couldn't",
"count",
"counter",
"countries",
"country",
"couple",
"course",
"courses",
"cover",
"covering",
"cow",
"crap",
"crashing",
"crass",
"cratered",
"crease",
"create",
"created",
"creating",
"credibility",
"credit",
"crime",
"criminal",
"crisis",
"crop",
"cross",
"crosstalk",
"crowd",
"crowds",
"cruz",
"cry",
"cunning",
"currency",
"current",
"cut",
"cuts",
"cutting",
"dad",
"dance",
"danger",
"dark",
"day",
"days",
"de",
"dea",
"dead",
"deal",
"dealing",
"deals",
"dealt",
"dear",
"death",
"debate",
"debt",
"decades",
"decide",
"decided",
"decimal",
"deductibles",
"deep",
"deficits",
"degree",
"democrat",
"democrats",
"denied",
"depend",
"depends",
"describe",
"desert",
"deserves",
"design",
"destabilize",
"destabilized",
"destroyed",
"destroying",
"destructive",
"details",
"determine",
"devaluation",
"devalue",
"devalued",
"devaluing",
"develop",
"dictionary",
"did",
"didn't",
"die",
"differ",
"difference",
"different",
"differently",
"difficult",
"direct",
"directly",
"disassociate",
"disaster",
"disastrous",
"discuss",
"discussing",
"disgrace",
"disgusting",
"dishonest",
"dislike",
"distant",
"divide",
"division",
"do",
"doctor",
"doctors",
"does",
"doesn't",
"dog",
"dogs",
"doing",
"dollar",
"dollars",
"domestic",
"don",
"don't",
"donald",
"done",
"donnie",
"donors",
"door",
"doral",
"double",
"down",
"drain",
"draw",
"dream",
"dress",
"drink",
"drive",
"drop",
"drugs",
"dry",
"duck",
"dump",
"dumping",
"during",
"dying",
"e",
"each",
"ear",
"early",
"earn",
"earth",
"ease",
"easily",
"east",
"eat",
"eating",
"economically",
"economists",
"economy",
"edge",
"education",
"effect",
"egg",
"eight",
"either",
"elected",
"election",
"electric",
"element",
"elevators",
"elite",
"else",
"else's",
"embarrass",
"employ",
"employed",
"employees",
"end",
"ended",
"endorsing",
"enemies",
"enemy",
"energy",
"engine",
"engineers",
"england",
"english",
"enormous",
"enough",
"enter",
"enterprise",
"environmental",
"environmentalist",
"equal",
"equate",
"equipment",
"eric",
"escaped",
"especially",
"esprit",
"establishment",
"estate",
"et",
"europe",
"evanka",
"even",
"evening",
"event",
"eventually",
"ever",
"every",
"everybody",
"everyplace",
"everything",
"everywhere",
"evolved",
"exact",
"example",
"except",
"exception",
"excite",
"exclusively",
"excuse",
"executive",
"exercise",
"exist",
"exonerated",
"expect",
"expectations",
"expenses",
"expensive",
"experience",
"experiment",
"explain",
"explode",
"extensions",
"extremely",
"eye",
"face",
"fact",
"factor",
"factory",
"fair",
"fairly",
"fairness",
"fall",
"falling",
"falls",
"false",
"family",
"famous",
"fantastic",
"far",
"farm",
"fashioned",
"fast",
"fat",
"father",
"favor",
"fear",
"fed",
"feed",
"feel",
"feet",
"fell",
"fellas",
"fellow",
"felt",
"few",
"field",
"fig",
"fight",
"fighting",
"figure",
"figures",
"filed",
"filing",
"fill",
"final",
"finalized",
"finally",
"finance",
"financial",
"find",
"fine",
"finest",
"finger",
"finish",
"fiorina",
"fire",
"firm",
"first",
"fish",
"fit",
"five",
"fix",
"flat",
"flexibility",
"flexible",
"floor",
"flow",
"flower",
"fly",
"folks",
"follow",
"food",
"fool",
"foolish",
"foot",
"football",
"for",
"force",
"ford",
"foreign",
"forest",
"forget",
"form",
"forming",
"fortune",
"forward",
"found",
"foundation",
"four",
"fracking",
"fraction",
"francisco",
"frankly",
"fraud",
"free",
"fresh",
"friend",
"friends",
"from",
"front",
"fruit",
"full",
"fully",
"fun",
"funded",
"funder",
"funding",
"game",
"garden",
"gas",
"gate",
"gather",
"gave",
"gdp",
"gen",
"general",
"generally",
"gentle",
"gentleman",
"gentlemen",
"george",
"get",
"gets",
"getting",
"gilmore",
"girl",
"give",
"given",
"giver",
"glad",
"glass",
"go",
"god",
"goes",
"going",
"gold",
"golf",
"gone",
"gonna",
"good",
"gorgeous",
"got",
"gotta",
"gotten",
"gov",
"govern",
"government",
"governmental",
"governor",
"graduate",
"graham",
"grand",
"grass",
"gray",
"great",
"greatest",
"greatly",
"greece",
"green",
"grew",
"gridlock",
"gross",
"ground",
"group",
"groups",
"grow",
"guarantee",
"guardia",
"guards",
"guess",
"guide",
"gun",
"guns",
"guy",
"guys",
"hack",
"had",
"hair",
"haired",
"half",
"hampshire",
"hand",
"happen",
"happened",
"happening",
"happens",
"happy",
"hard",
"harvard",
"has",
"hat",
"hate",
"hating",
"have",
"haven't",
"having",
"he",
"he'd",
"head",
"headquarters",
"heads",
"health",
"hear",
"heard",
"heart",
"heat",
"heavy",
"held",
"hell",
"hello",
"help",
"her",
"here",
"hero",
"hey",
"high",
"highest",
"highly",
"hill",
"hillary",
"him",
"himself",
"hire",
"his",
"hispanic",
"hispanics",
"history",
"hit",
"hold",
"hole",
"home",
"honest",
"honestly",
"honor",
"honorable",
"honored",
"hope",
"hopefully",
"horrible",
"horror",
"horse",
"hot",
"hotel",
"hotels",
"hour",
"house",
"how",
"however",
"huckabee",
"huge",
"huh",
"human",
"humanitarian",
"humvees",
"hundred",
"hundreds",
"hunt",
"hurry",
"husband",
"hyatt",
"i",
"i'd",
"i'll",
"ice",
"idea",
"idiots",
"if",
"illegal",
"illegally",
"illegals",
"imagine",
"imbalance",
"immediately",
"immigrants",
"immigration",
"impact",
"important",
"importantly",
"impossible",
"in",
"inappropriate",
"inaudible",
"incentive",
"inch",
"inclined",
"include",
"including",
"incorrectly",
"increase",
"incredible",
"incredibly",
"independent",
"indicate",
"industry",
"inexpensively",
"infrastructure",
"insect",
"instances",
"instant",
"instead",
"instinct",
"instrument",
"insurance",
"intelligent",
"intent",
"intention",
"interest",
"interested",
"interesting",
"interestingly",
"interests",
"into",
"invent",
"invite",
"iowa",
"iran",
"iraq",
"iron",
"is",
"isis",
"islamic",
"island",
"isn't",
"israel",
"issues",
"it",
"its",
"itself",
"jailed",
"japan",
"jared",
"jeb",
"jersey",
"jets",
"jim",
"job",
"jobs",
"joe",
"john",
"join",
"journal",
"joy",
"judge",
"jump",
"junk",
"just",
"kai",
"kanye",
"kasich",
"kate",
"keep",
"kegs",
"kept",
"kerry",
"key",
"kicks",
"kidding",
"kill",
"killed",
"killer",
"killers",
"killing",
"killingsmurders",
"kind",
"king",
"knew",
"knocking",
"knocks",
"know",
"knowing",
"knows",
"komatsu",
"l",
"la",
"labor",
"ladies",
"lady",
"lake",
"land",
"language",
"large",
"largest",
"last",
"late",
"later",
"latin",
"laugh",
"laughable",
"laughing",
"laughter",
"laura",
"law",
"laws",
"lax",
"lay",
"lead",
"leader",
"leaders",
"leadership",
"leading",
"league",
"leagues",
"learn",
"learned",
"least",
"leave",
"led",
"left",
"leg",
"legal",
"legally",
"lenders",
"length",
"less",
"let",
"letter",
"level",
"levels",
"leverage",
"liabilities",
"lie",
"life",
"lifestyle",
"lifetime",
"lift",
"light",
"like",
"liked",
"liking",
"lindsey",
"line",
"lines",
"liquid",
"list",
"listen",
"listening",
"literally",
"little",
"live",
"lives",
"living",
"loaded",
"loan",
"lobbyist",
"lobbyists",
"local",
"locate",
"located",
"log",
"lone",
"long",
"longer",
"look",
"looked",
"looking",
"los",
"lose",
"losers",
"losing",
"lost",
"lot",
"lots",
"loud",
"love",
"loves",
"low",
"macarthur",
"machine",
"made",
"magic",
"magnet",
"mail",
"main",
"major",
"make",
"maker",
"makes",
"making",
"man",
"manhattan",
"manner",
"manufactured",
"manufacturer",
"manufacturing",
"many",
"map",
"marco",
"margins",
"mark",
"market",
"martin",
"mass",
"massive",
"master",
"match",
"material",
"matter",
"may",
"maybe",
"me",
"mean",
"means",
"meant",
"measure",
"meat",
"media",
"medicaid",
"medical",
"medicare",
"medieval",
"meet",
"meeting",
"megyn",
"melania",
"melody",
"men",
"mess",
"met",
"metal",
"method",
"mexican",
"mexicans",
"mexico",
"miami",
"michigan",
"microphone",
"middle",
"might",
"mike",
"mile",
"militarily",
"military",
"milk",
"million",
"millions",
"mind",
"mindset",
"mine",
"minor",
"minute",
"minutes",
"miss",
"misspeak",
"misspoke",
"mix",
"modern",
"modestly",
"molecule",
"moment",
"money",
"monmouth",
"month",
"months",
"moon",
"morally",
"more",
"morning",
"most",
"mother",
"motion",
"mount",
"mountain",
"mouth",
"move",
"mr",
"much",
"multiply",
"murderers",
"music",
"must",
"my",
"myself",
"name",
"named",
"names",
"nation",
"national",
"natural",
"nature",
"near",
"necessary",
"neck",
"need",
"needs",
"negative",
"negatives",
"negotiate",
"negotiates",
"negotiating",
"negotiation",
"negotiator",
"negotiators",
"neighbor",
"net",
"networks",
"nevada",
"never",
"nevertheless",
"new",
"news",
"newspapers",
"next",
"nice",
"night",
"nine",
"no",
"nobody",
"noise",
"nomination",
"nominee",
"nonsense",
"noon",
"nor",
"north",
"nose",
"not",
"note",
"nothing",
"notice",
"notified",
"noun",
"now",
"nuclear",
"number",
"numbers",
"numeral",
"nurses",
"nutshell",
"o'donnell",
"o'malley",
"obama",
"obamacare",
"object",
"obligation",
"observe",
"obsolete",
"obvious",
"occasions",
"occur",
"ocean",
"of",
"off",
"offer",
"offered",
"offering",
"office",
"officially",
"often",
"oftentimes",
"oh",
"oil",
"ok",
"okay",
"old",
"on",
"once",
"one",
"ones",
"only",
"open",
"operate",
"opinion",
"opposed",
"opposite",
"or",
"order",
"orders",
"ordierno",
"organ",
"original",
"other",
"others",
"ought",
"our",
"ourself",
"out",
"outside",
"outsider",
"over",
"overrated",
"owe",
"owes",
"own",
"oxygen",
"package",
"pacs",
"page",
"pages",
"paid",
"paint",
"pair",
"paper",
"papers",
"paragraph",
"parent",
"part",
"participation",
"particular",
"parts",
"party",
"pass",
"past",
"patches",
"patents",
"path",
"patients",
"patriots",
"patrol",
"pattern",
"patton",
"paul",
"pay",
"payer",
"paying",
"peanuts",
"pennsylvania",
"people",
"per",
"percent",
"perhaps",
"period",
"perry",
"person",
"pertinent",
"ph",
"phase",
"phones",
"phrase",
"physicists",
"pick",
"picture",
"piece",
"pitch",
"place",
"places",
"plain",
"plan",
"plane",
"planes",
"planet",
"plans",
"plant",
"plants",
"play",
"playing",
"please",
"pledge",
"pledging",
"plenty",
"plural",
"poem",
"point",
"policies",
"policy",
"political",
"politically",
"politician",
"politicians",
"politics",
"poll",
"polls",
"poor",
"poorly",
"populate",
"port",
"portions",
"pose",
"position",
"positive",
"possible",
"possibly",
"post",
"potatoes",
"potential",
"potomac",
"pound",
"pouring",
"poverty",
"powder",
"power",
"practice",
"predictions",
"premiums",
"prepare",
"prepared",
"preparedness",
"present",
"president",
"press",
"pretty",
"price",
"priebus",
"princeton",
"principles",
"print",
"prison",
"prisoners",
"pritzker",
"private",
"pro",
"probable",
"probably",
"problem",
"problems",
"process",
"produce",
"producing",
"product",
"profitable",
"program",
"project",
"proliferation",
"promise",
"promised",
"proper",
"properly",
"property",
"protect",
"protection",
"proud",
"prove",
"proven",
"provide",
"public",
"publicly",
"pull",
"punched",
"puncher",
"pundits",
"push",
"pushing",
"put",
"putin",
"putting",
"qatar",
"quart",
"quarter",
"queens",
"question",
"questions",
"quick",
"quickly",
"quickness",
"quiet",
"quite",
"quitting",
"quotient",
"race",
"radio",
"rail",
"rain",
"raise",
"ran",
"rand",
"range",
"rapists",
"rate",
"rates",
"rather",
"ray",
"reach",
"read",
"ready",
"reagan",
"real",
"realized",
"really",
"reason",
"reasons",
"rebuild",
"rebuilding",
"rebuilt",
"receive",
"received",
"recently",
"reconsider",
"record",
"red",
"reduce",
"referred",
"region",
"rein",
"reince",
"relations",
"relationship",
"relationships",
"remember",
"renegotiate",
"repeal",
"repeat",
"replace",
"replaced",
"reply",
"report",
"reported",
"reporter",
"reporters",
"represent",
"representing",
"republican",
"republicans",
"require",
"respect",
"respected",
"responsible",
"rest",
"result",
"results",
"return",
"rhetoric",
"rich",
"rick",
"rid",
"ride",
"right",
"ring",
"rip",
"ripped",
"ripping",
"rips",
"rise",
"river",
"rnc",
"road",
"roads",
"roadways",
"rock",
"rocket",
"roll",
"romney",
"ronald",
"roof",
"room",
"root",
"rope",
"rose",
"rosie",
"rough",
"round",
"row",
"rub",
"rubio",
"rule",
"run",
"running",
"russia",
"s",
"sad",
"sadly",
"safe",
"said",
"sail",
"salt",
"same",
"san",
"sand",
"sanders",
"santorum",
"sat",
"saudi",
"saudis",
"save",
"saw",
"say",
"saying",
"says",
"scale",
"scary",
"school",
"schools",
"science",
"score",
"scotland",
"sea",
"search",
"season",
"seat",
"second",
"seconds",
"secretary",
"secrets",
"section",
"sections",
"security",
"see",
"seed",
"seem",
"seen",
"segment",
"select",
"self",
"sell",
"selling",
"semi",
"send",
"sending",
"sends",
"sense",
"sent",
"sentence",
"separate",
"serious",
"serve",
"services",
"set",
"settle",
"seven",
"several",
"shall",
"shape",
"share",
"sharp",
"sharper",
"she",
"sheet",
"shell",
"shine",
"ship",
"ships",
"shocked",
"shocking",
"shoe",
"shooting",
"shop",
"shore",
"short",
"should",
"shoulder",
"shouldn't",
"shout",
"show",
"showed",
"side",
"sight",
"sign",
"signals",
"signed",
"signing",
"silent",
"silicon",
"silver",
"similar",
"simple",
"simply",
"simultaneously",
"since",
"sing",
"single",
"sister",
"sit",
"site",
"sites",
"sitting",
"situation",
"six",
"size",
"skill",
"skin",
"sky",
"slave",
"sleep",
"slip",
"slow",
"small",
"smart",
"smarter",
"smartest",
"smell",
"smile",
"snow",
"so",
"social",
"soft",
"soil",
"sold",
"soldier",
"soldiers",
"solution",
"solve",
"some",
"somebody",
"someday",
"something",
"sometimes",
"son",
"song",
"soon",
"sophisticated",
"sorry",
"sort",
"sorts",
"sought",
"soul",
"sound",
"sounds",
"south",
"southern",
"space",
"spanish",
"speak",
"speaking",
"speaks",
"special",
"specifics",
"spectacular",
"speech",
"speeches",
"speed",
"spell",
"spend",
"spending",
"spends",
"spent",
"spigot",
"spirit",
"spoke",
"spot",
"spread",
"spreads",
"spring",
"square",
"stage",
"stand",
"standard",
"standpoint",
"stands",
"stanford",
"star",
"start",
"started",
"starting",
"state",
"statement",
"statements",
"states",
"station",
"statistic",
"statistics",
"stats",
"stay",
"stays",
"stead",
"stealing",
"steam",
"steel",
"step",
"stick",
"still",
"stock",
"stone",
"stood",
"stop",
"store",
"stories",
"storming",
"story",
"straight",
"straighten",
"strange",
"stream",
"street",
"strength",
"strengthen",
"strengthened",
"stretch",
"string",
"strong",
"stronger",
"strongest",
"strongly",
"student",
"study",
"stuff",
"stupid",
"stupidity",
"subcontractors",
"subject",
"substance",
"substantial",
"subtract",
"success",
"successful",
"such",
"sudden",
"suffix",
"sugar",
"suggest",
"suggested",
"suit",
"sum",
"summary",
"summer",
"sun",
"super",
"superstar",
"supply",
"support",
"supporters",
"supporting",
"supposed",
"sure",
"surface",
"surplus",
"surprise",
"sustain",
"swear",
"sweated",
"sweet",
"swim",
"syllable",
"symbol",
"syria",
"system",
"table",
"tail",
"take",
"taken",
"takes",
"taking",
"talent",
"talented",
"talk",
"talked",
"talking",
"talks",
"tall",
"tapped",
"tariff",
"tax",
"teach",
"team",
"tear",
"ted",
"teeth",
"television",
"tell",
"telling",
"tells",
"temperature",
"ten",
"tennessee",
"tens",
"term",
"terminate",
"terms",
"terrible",
"terrific",
"terrorism",
"terrorists",
"test",
"than",
"thank",
"thanks",
"that",
"that's",
"the",
"their",
"them",
"themselves",
"then",
"there",
"therefore",
"these",
"they",
"they'd",
"thick",
"thin",
"thing",
"things",
"think",
"thinking",
"thinks",
"third",
"this",
"those",
"though",
"thought",
"thousand",
"thousands",
"three",
"thrilled",
"thriving",
"through",
"throw",
"thrown",
"thus",
"tie",
"tiffany",
"time",
"times",
"tiny",
"tire",
"tired",
"to",
"toads",
"today",
"together",
"tokyo",
"told",
"tom",
"tone",
"tonight",
"too",
"took",
"tool",
"top",
"total",
"totally",
"touch",
"tough",
"tougher",
"toward",
"tower",
"town",
"track",
"tractor",
"trade",
"trader",
"train",
"traitor",
"transaction",
"travel",
"treated",
"treating",
"tree",
"tremendous",
"triangle",
"trillion",
"trip",
"trouble",
"truck",
"trucks",
"true",
"truly",
"trump",
"trust",
"truth",
"try",
"trying",
"tube",
"tubes",
"tunnels",
"turn",
"turned",
"turning",
"turnout",
"twenty",
"two",
"type",
"ultimately",
"unable",
"unbelievable",
"unbelievably",
"under",
"understand",
"unemployment",
"unfortunately",
"unions",
"unit",
"united",
"unless",
"unsalvageable",
"until",
"up",
"upset",
"us",
"use",
"used",
"useless",
"uses",
"using",
"usual",
"valley",
"valuable",
"value",
"vanessa",
"various",
"vary",
"vehicles",
"ventured",
"verb",
"very",
"vets",
"vibrant",
"vice",
"vicious",
"victories",
"victory",
"view",
"village",
"violence",
"virtually",
"visit",
"vladimir",
"voice",
"vote",
"voters",
"votes",
"vowel",
"wait",
"waiting",
"walk",
"wall",
"walls",
"want",
"wanted",
"wants",
"war",
"warm",
"was",
"wash",
"washington",
"wasn't",
"waste",
"watch",
"watched",
"watching",
"water",
"wave",
"way",
"ways",
"we",
"we'd",
"weak",
"weaker",
"weakness",
"wealth",
"weapons",
"wear",
"weather",
"web",
"wedding",
"week",
"weeks",
"weight",
"welfare",
"well",
"went",
"were",
"weren't",
"west",
"wharton",
"what",
"whatever",
"whatsoever",
"wheel",
"when",
"whenever",
"where",
"whether",
"which",
"while",
"white",
"who",
"whoa",
"whoever",
"whole",
"whose",
"why",
"wide",
"wife",
"wild",
"will",
"win",
"wind",
"window",
"wing",
"wins",
"winter",
"wire",
"wires",
"wish",
"with",
"within",
"without",
"woman",
"women",
"won't",
"wonder",
"wonderful",
"wood",
"word",
"words",
"work",
"worked",
"workforce",
"working",
"works",
"world",
"worse",
"worst",
"worth",
"would",
"wouldn't",
"wounded",
"wow",
"write",
"writes",
"written",
"wrong",
"wrote",
"yale",
"yard",
"yeah",
"year",
"years",
"yellow",
"yemen",
"yen",
"yes",
"yesterday",
"yet",
"york",
"you",
"you'd",
"you'll",
"young",
"your",
"yourself",
"zero",
}
|
cshaley/TrumpScript
|
src/trumpscript/allowed_words.py
|
Python
|
mit
| 21,685
|
[
"VisIt"
] |
9aed0d14b20cac21c642ace4adbd214d8f37fe834165ed4ec9c739054c1b8782
|
# normalised correlation test after gaussian smoothing
# gaussian radii: 40 and 60
# thresholds: dBZ = 15, 25, 35
#
import copy
from armor import pattern
from armor import objects3 as ob
from armor.tests.roughwork20131106 import construct3by3
#from armor.geomtery import frames as fr
import numpy as np
import numpy.ma as ma
import pylab
pylab.ion()
#pylab.draw()
kongrey = ob.kongrey
wrf = ob.kongreywrf2
wrf.fix()
dataTime = '20130828.1500'
outputFolder = pattern.a.outputFolder
k = kongrey(dataTime)[0]
#k.load()
#k.setThreshold(0)
#k.getCentroid()
dataTimeStart = k.timeDiff(hours=-6)
dataTimeEnd = k.timeDiff(hours= 6)
dataTimes = [w.dataTime for w in wrf]
dataTimes = [w for w in dataTimes if w >= dataTimeStart and w <= dataTimeEnd]
dataTimes = sorted(list(set(dataTimes))) # '20130828.0900' to '20130828.2100'
#for T in dataTimes:
# wrf.load(T)
#wrf.cutUnloaded()
#wrf.setThreshold(0)
wrf.list = [w for w in wrf if w.dataTime in dataTimes]
outputString = ''
correlationsAll=[]
makeImages = True
saveImages = False
count = -1
for thres in [15, 25, 30]:
wrf.listTemp = copy.copy(wrf.list)
for sigma in [60, 40, 20]:
outputString = ''
count +=1
#if count <1:
# continue
# LOAD k, smooth by gaussian , and get threshold
k.load()
k.setThreshold(0)
k.matrix0 = k.matrix.copy()
k.getCentroid()
k.matrix = k.gaussianFilter(sigma).matrix
k.matrix = 100.* (k.matrix>=thres)
k.matrix.mask = np.zeros(k.matrix.shape)
#k.vmax=2
#k.vmin=-2
#k.makeImage(closeAll=True)
#pylab.draw()
correlations = []
for w in wrf.listTemp:
#try:
# LOAD w, smooth by gaussian , and get threshold
w.load()
w.setThreshold(0)
w.getCentroid()
w1 = w.gaussianFilter(sigma)
topRowName = w.name + ', gaussian(' + str(sigma) + ') and ' + k.name
topRow = ma.hstack([w.matrix, w1.matrix, k.matrix0])
#k.load()
#k.setThreshold(0)
#topRow = ma.hstack([w.matrix, w1.matrix, k.matrix])
w1.matrix = 100.*(w1.matrix>=thres)
w1.matrix.mask = np.zeros(w1.matrix.shape)
#w1.vmax = 2
#w1.vmin =-2
#w.makeImage(closeAll=True)
#pylab.draw()
#print "w.matrix.shape, w.matrix.mask.shape", w.matrix.shape, w.matrix.mask.shape
try:
############################################
# punchlines
w2 = w1.momentNormalise(k)
corr = w2.corr(k)
#w2.vmax = 2
#w2.vmin =-2
w2.matrix = ma.hstack([w1.matrix, w2.matrix, k.matrix])
w2.name = w.name + ', normalised, and ' + k.name + '\nnormalised correlation: ' + str(corr)
w2.matrix = ma.vstack([w2.matrix, topRow])
w2.name = topRowName + '\n' + "bottom row:" + w2.name
w2.imagePath = '/home/k/ARMOR/python/testing/' + w.name + '_' + k.name + '_sigma' + str(sigma) + '_thres' + str(thres) + '.png'
w2.vmin= -20.
w2.vmax = 100.
if saveImages:
w2.saveImage()
if makeImages:
w2.makeImage(closeAll=True)
pylab.draw()
#
############################################
#except IndexError:
except SyntaxError:
corr = -999
correlations.append({
'sigma': sigma,
'thres': thres,
'k' : k.name,
'w' : w.name,
'corr' : corr,
})
print k.name, w.name, 'sigma:', sigma, 'thres:', thres, 'corr:', corr
w.matrix = 0 # unload!!
#except:
# print "Error!! - "
# print "k.name, w.name, sigma, thres"
# print k.name, w.name, sigma, thres
# sort, get the best matches
correlations.sort(key=lambda v: v['corr'], reverse=True)
# output
outputString += '\n\n................................................\n'
outputString += 'Top Matches for\n'
outputString += ' sigma, thres: ' + str(sigma) + ', ' + str(thres) + '\n'
for entry in correlations[:20]:
outputString += ' k, w, corr: ' + '\t' + str(entry['k']) + \
'\t' + str(entry['w']) + \
'\t' + str(entry['corr']) + '\n'
print outputString
open('gaussianSmoothNormalisedCorrelationTest.log.txt', 'a').write(outputString)
correlationsAll.append(correlations)
candidates = correlations[: len(correlations)//2] # taking the top half going down the scale
candidates = [entry['w'] for entry in candidates] # taking the w.name only
wrf.listTemp = [v for v in wrf.listTemp if v.name in candidates] # trim the list
open('gaussianSmoothNormalisedCorrelationTest.log.txt', 'a').write('\n\n\n====\nAll Correlations'+\
str(correlationsAll))
|
yaukwankiu/armor
|
tests/gaussianSmoothedNormalisedCorrelationTest.py
|
Python
|
cc0-1.0
| 5,727
|
[
"Gaussian"
] |
4d26fb95a9fe527f2ea321c5e5f5753fc6ede40e7821f1e41794ca6eebd10767
|
"""
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <ronweiss@gmail.com>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
|
000ubird/HighResolution
|
src/samples/plot_gmm_classifier.py
|
Python
|
mit
| 3,918
|
[
"Gaussian"
] |
dbc40730f0419c3f158e0e08df1a8e68edadb6fac582505df60d5afdb7866f7e
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import six
from .biology import BiologyType
from .cell import Cell
from .dataObject import DatatypeProperty, ObjectProperty
__all__ = ['Connection']
class SynapseType:
Chemical = 'send'
GapJunction = 'gapJunction'
class Termination:
Neuron = 'neuron'
Muscle = 'muscle'
class Connection(BiologyType):
class_context = BiologyType.class_context
post_cell = ObjectProperty(value_type=Cell)
''' The post-synaptic cell '''
pre_cell = ObjectProperty(value_type=Cell)
''' The pre-synaptic cell '''
number = DatatypeProperty()
''' The weight of the connection '''
synclass = DatatypeProperty()
''' The kind of Neurotransmitter (if any) sent between `pre_cell` and `post_cell` '''
syntype = DatatypeProperty()
''' The kind of synaptic connection. 'gapJunction' indicates a gap junction and 'send' a chemical synapse '''
termination = DatatypeProperty()
''' Where the connection terminates. Inferred from type of post_cell at initialization '''
def __init__(self,
pre_cell=None,
post_cell=None,
number=None,
syntype=None,
synclass=None,
termination=None,
**kwargs):
super(Connection, self).__init__(pre_cell=pre_cell,
post_cell=post_cell,
number=number,
syntype=syntype,
synclass=synclass,
**kwargs)
if isinstance(termination, six.string_types):
termination = termination.lower()
if termination in ('neuron', Termination.Neuron):
self.termination(Termination.Neuron)
elif termination in ('muscle', Termination.Muscle):
self.termination(Termination.Muscle)
if isinstance(syntype, six.string_types):
syntype = syntype.lower()
if syntype in ('send', SynapseType.Chemical):
self.syntype(SynapseType.Chemical)
elif syntype in ('gapjunction', SynapseType.GapJunction):
self.syntype(SynapseType.GapJunction)
def defined_augment(self):
return (self.pre_cell.has_defined_value() and
self.post_cell.has_defined_value() and
self.syntype.has_defined_value())
def identifier_augment(self):
data = (self.pre_cell,
self.post_cell,
self.syntype)
data = tuple(x.defined_values[0].identifier.n3() for x in data)
data = "".join(data)
return self.make_identifier(data)
def __str__(self):
nom = []
if self.pre_cell.has_defined_value():
nom.append(('pre_cell', self.pre_cell.values[0]))
if self.post_cell.has_defined_value():
nom.append(('post_cell', self.post_cell.values[0]))
if self.syntype.has_defined_value():
nom.append(('syntype', self.syntype.values[0]))
if self.termination.has_defined_value():
nom.append(('termination', self.termination.values[0]))
if self.number.has_defined_value():
nom.append(('number', self.number.values[0]))
if self.synclass.has_defined_value():
nom.append(('synclass', self.synclass.values[0]))
if len(nom) == 0:
return super(Connection, self).__str__()
else:
return 'Connection(' + \
', '.join('{}={}'.format(n[0], n[1]) for n in nom) + \
')'
__yarom_mapped_classes__ = (Connection,)
|
gsarma/PyOpenWorm
|
PyOpenWorm/connection.py
|
Python
|
mit
| 3,807
|
[
"NEURON"
] |
f325665499ae6d380832f0ffce238b7a89db454facab0efcd2f8da7a1d4df474
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module containing class to create an ion
"""
import re
from copy import deepcopy
import numpy as np
from monty.json import MSONable
from pymatgen.core.composition import Composition
from pymatgen.util.string import formula_double_format
class Ion(Composition, MSONable):
"""
Basic ion object. It is just a Composition object with an additional
variable to store charge.
The net charge can either be represented as Mn++, or Mn+2, or Mn[2+].
Note the order of the sign and magnitude in each representation.
"""
def __init__(self, composition, charge=0.0, properties=None):
"""
Flexible Ion construction, similar to Composition.
For more information, please see pymatgen.core.Composition
"""
super().__init__(composition)
self._charge = charge
@classmethod
def from_formula(cls, formula: str) -> 'Ion':
"""
Creates Ion from formula.
:param formula:
:return: Ion
"""
charge = 0.0
f = formula
m = re.search(r"\[([^\[\]]+)\]", f)
if m:
m_chg = re.search(r"([\.\d]*)([+-])", m.group(1))
if m_chg:
if m_chg.group(1) != "":
charge += float(m_chg.group(1)) * \
(float(m_chg.group(2) + "1"))
else:
charge += float(m_chg.group(2) + "1")
f = f.replace(m.group(), "", 1)
m = re.search(r"\(aq\)", f)
if m:
f = f.replace(m.group(), "", 1)
for m_chg in re.finditer(r"([+-])([\.\d]*)", f):
sign = m_chg.group(1)
sgn = float(str(sign + "1"))
if m_chg.group(2).strip() != "":
charge += float(m_chg.group(2)) * sgn
else:
charge += sgn
f = f.replace(m_chg.group(), "", 1)
composition = Composition(f)
return cls(composition, charge)
@property
def formula(self):
"""
Returns a formula string, with elements sorted by electronegativity,
e.g., Li4 Fe4 P4 O16.
"""
formula = super().formula
chg_str = ""
if self.charge > 0:
chg_str = " +" + formula_double_format(self.charge, False)
elif self._charge < 0:
chg_str = " " + formula_double_format(self.charge, False)
return formula + chg_str
@property
def anonymized_formula(self):
"""
An anonymized formula. Appends charge to the end
of anonymized composition
"""
anon_formula = super().anonymized_formula
chg = self._charge
chg_str = ""
if chg > 0:
chg_str += ("{}{}".format('+', str(int(chg))))
elif chg < 0:
chg_str += ("{}{}".format('-', str(int(np.abs(chg)))))
return anon_formula + chg_str
@property
def reduced_formula(self):
"""
Returns a reduced formula string with appended charge.
"""
reduced_formula = super().reduced_formula
charge = self._charge / self.get_reduced_composition_and_factor()[1]
if charge > 0:
if abs(charge) == 1:
chg_str = "[+]"
else:
chg_str = "[" + formula_double_format(charge, False) + "+]"
elif charge < 0:
if abs(charge) == 1:
chg_str = "[-]"
else:
chg_str = "[{}-]".format(formula_double_format(abs(charge),
False))
else:
chg_str = "(aq)"
return reduced_formula + chg_str
@property
def alphabetical_formula(self):
"""
Returns a reduced formula string with appended charge
"""
alph_formula = super().alphabetical_formula
chg_str = ""
if self.charge > 0:
chg_str = " +" + formula_double_format(self.charge, False)
elif self.charge < 0:
chg_str = " " + formula_double_format(self.charge, False)
return alph_formula + chg_str
@property
def charge(self):
"""
Charge of the ion
"""
return self._charge
def as_dict(self):
"""
Returns:
dict with composition, as well as charge
"""
d = super().as_dict()
d['charge'] = self.charge
return d
@classmethod
def from_dict(cls, d):
"""
Generates an ion object from a dict created by as_dict().
Args:
d:
{symbol: amount} dict.
"""
input = deepcopy(d)
charge = input.pop('charge')
composition = Composition(input)
return Ion(composition, charge)
@property
def to_reduced_dict(self):
"""
Returns:
dict with element symbol and reduced amount e.g.,
{"Fe": 2.0, "O":3.0}.
"""
d = self.composition.to_reduced_dict
d['charge'] = self.charge
return d
@property
def composition(self):
"""Composition of ion."""
return Composition(self._data)
def __eq__(self, other):
if self.composition != other.composition:
return False
if self.charge != other.charge:
return False
return True
def __add__(self, other):
"""
Addition of two ions.
"""
new_composition = self.composition + other.composition
new_charge = self.charge + other.charge
return Ion(new_composition, new_charge)
def __sub__(self, other):
"""
Subtraction of two ions
"""
new_composition = self.composition - other.composition
new_charge = self.charge - other.charge
return Ion(new_composition, new_charge)
def __mul__(self, other):
"""
Multiplication of an Ion with a factor
"""
new_composition = self.composition * other
new_charge = self.charge * other
return Ion(new_composition, new_charge)
def __hash__(self):
return hash((self.composition, self.charge))
def __str__(self):
return self.formula
def __repr__(self):
return "Ion: " + self.formula
|
gVallverdu/pymatgen
|
pymatgen/core/ion.py
|
Python
|
mit
| 6,391
|
[
"pymatgen"
] |
2860e0952b09cf04d400f71fe3f5a9953b2b6531a494e7fb2cf7b36bd439037b
|
"""
Various general utility functions.
"""
import math
import os.path
import warnings
import scipy
import numpy as np
from medpy.io import load
import skimage
from skimage import io, transform, morphology, filters
from sklearn import cluster
def preprocess_image(image_path, mask_path=None):
"""Preprocess an image, optionally using a mask.
:param image_path: path to the image.
:param mask_path: path to the mask to use (optional).
"""
img = load_image(image_path)
msk = None
if mask_path is not None:
msk = load_mask(mask_path)
msk = resize_mask_to_image(msk, img.shape)
img[msk == 0] = 0
img[msk != 0] = normalise_image(img[msk != 0])
else:
img = normalise_image(img)
return img, msk
def load_image(image_path):
"""Load an image from file
:param image_path: path to the image one disk.
:returns: ndarray -- the image as an ndarray
"""
name, ext = os.path.splitext(image_path)
if ext == ".dcm":
img = load_synthetic_mammogram(image_path)
else:
img = load_real_mammogram(image_path)
return img
def load_synthetic_mammogram(image_path):
"""Load a synthetic mammogram from file
:param image_path: path to the image one disk.
:returns: ndarray -- the image as an ndarray
"""
img, image_header = load(image_path)
img = np.invert(img)
img = img.astype('float64')
img = skimage.transform.pyramid_expand(img, 2)
return img
def load_real_mammogram(image_path):
"""Load a real mammogram from file
:param image_path: path to the image one disk.
:returns: ndarray -- the image as an ndarray
"""
img = io.imread(image_path, as_grey=True)
img = skimage.img_as_float(img)
return img
def load_mask(mask_path):
"""Load a binary mask from file
:param mask_path: path to the image one disk.
:returns: ndarray -- the mask as an ndarray
"""
msk = io.imread(mask_path, as_grey=True)
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
msk = skimage.img_as_uint(msk) # cast from uint32 to unit16
msk = skimage.img_as_float(msk) # cast from uint16 to float
except Warning:
# Warning precision loss is ok. We only need binary info for mask
pass
msk = erode_mask(msk, kernel_size=35)
return msk
def resize_mask_to_image(msk, img_shape):
"""Resize a binary mask to match the size of the image.
:param msk: ndarray representing the mask
:param img_shape: shape to resize the mask to.
:returns: ndarray -- the resized mask as an ndarray
"""
msk = transform.resize(msk, img_shape)
msk[msk > 0] = 1
msk[msk == 0] = 0
return msk
def normalise_image(img):
"""Normalise an image
:returns: ndarry -- the normalised image
"""
return (img-img.min())/(img.max()-img.min())
def binary_image(img, threshold):
"""Create a binary image using a threshold.
Everything greater than the threshold is set to 1, while everything less
than is set to zero.
:param img: the image to threshold
:param threshold: the value to threshold the image with
:returns: ndarray -- int64 array representing the binary version of the
image
"""
binary_image = np.zeros(img.shape, dtype='uint8')
binary_image[img > threshold] = 1
return binary_image
def erode_mask(mask, kernel_func=morphology.disk, kernel_size=30):
"""Erode a mask using a kernel
Uses binary_erosion to erode the edge of a mask.
:param mask: the mask to erode
:param kernel_func: the function used to generate the kernel
(default: disk)
:param kernel_size: the size of the kernel to use (default: 30)
"""
eroded_mask = np.zeros(mask.shape)
kernel = kernel_func(kernel_size)
morphology.binary_erosion(mask, kernel, out=eroded_mask)
return eroded_mask
def to_polar_coordinates(x, y):
"""Convert the 2D pixel coordinates to polar coordinates
:param x: x coordinate of the point
:param y: y coordinate of the point
:returns: tuple -- (r,phi) of the point as polar coordinates
"""
theta = math.atan2(y, x)
if theta < 0:
theta = theta + 2 * math.pi
return math.hypot(x, y), theta
def transform_2d(f, grid, *args):
"""Apply a function to every element in a 2d array
:param f: function to apply
:param grid: array to apply the function too. Must be 2D.
:param args: addtional arguments to the function
:returns: ndarray - of transformed data
"""
out_grid = []
for row in grid:
out_row = []
for value in row:
out_value = f(value, *args)
out_row.append(out_value)
out_row = np.vstack(out_row)
out_grid.append(out_row)
return np.hstack(out_grid)
def vectorize_array(f, array, *args):
""" Helper function to vectorize across the rows of a 2D numpy array
:params f: function to vectorize
:params array: 2darray to iterate over.
:params args: list of arguments to pass to the function f
:returns: ndarray of the results of applying the function to each row.
"""
return np.array([f(row, *args) for row in array])
def gaussian_kernel(size, sigma=3):
""" Make gaussian kernel.
Code based on implementation by Andrew Giessel
https://gist.github.com/andrewgiessel/4635563
Accessed: 14/03/2015
"""
fwhm = 2.355*sigma
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
return np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)
def log_kernel(sigma):
""" Make a LoG kernel
:param sigma: sigma of the Gaussian to use
:returns: ndarray containing a LoG kernel
"""
size = sigma * 6.0 / 2
g = gaussian_kernel(size+1, sigma)
log = scipy.ndimage.filters.laplace(g, mode='wrap')
# remove the rubbish around the edge
log = log[1:-1, 1:-1]
return log
def make_mask(img):
"""Make a mask file from an image
Uses Otsu's thresholding technique. No correction is currently made for the
pectoral muscle.
:param img: the image to make a mask for
:returns: a binary ndarray the same size as the original image
"""
thresh = filters.threshold_otsu(img)
msk = np.zeros(img.shape)
msk[img > thresh] = 1
msk = skimage.img_as_uint(msk)
return msk
def cluster_image(img, n_clusters=5):
"""Segement an image into n different clusters.
:param img: the image to cluster
:param n_clusters: the number of clusters to use.
:returns: array -- array specifying the class associated with each pixel
"""
kmeans = cluster.KMeans(n_clusters=n_clusters, n_init=5)
X = img.reshape(img.size, 1)
labels = kmeans.fit_predict(X)
labels = labels.reshape(img.shape)
return labels
def clusters_from_labels(img, labels):
"""Create clusters of image pixels from labels generated by cluster_image
:param img: the image the clusters were generated from.
:param labels: the labels for each pixel in an image.
:returns: ndarray -- containing pixels corresponding to each cluster.
"""
clusters = []
for i in np.unique(labels):
img_cluster = img.copy()
img_cluster[labels != i] = 0
clusters.append(img_cluster)
return np.array(clusters)
def sort_clusters_by_density(clusters):
"""Sort an array of clusters by the most dense (most intense).
:param clusters: the array of clustered image pixels
:returns: ndarray -- array of clusters sorted by the least to most dense.
"""
totals = []
for c in clusters:
if np.count_nonzero(c) > 0:
totals.append(np.mean(c[np.nonzero(c)]))
else:
totals.append(0)
return clusters[np.argsort(totals)]
|
samueljackson92/major-project
|
src/mia/utils.py
|
Python
|
mit
| 7,885
|
[
"Gaussian"
] |
0db775b5ee285cacaf266c37470f2ee7419845f383c5e2c249b6e3ccd5c6998b
|
"""Test generation helpers
Intended to functionalize common tasks when working with the pytest_generate_tests hook.
When running a test, it is quite often the case that multiple parameters need to be passed
to a single test. An example of this would be the need to run a Provider Add test against
multiple providers. We will assume that the providers are stored in the yaml under a common
structure like so:
.. code-block:: yaml
providers:
prov_1:
name: test
ip: 10.0.0.1
test_vm: abc1
prov_2:
name: test2
ip: 10.0.0.2
test_vm: abc2
Our test requires that we have a Provider Object and as an example, the 'test_vm' field of the
object. Let's assume a test prototype like so::
test_provider_add(provider_obj, test_vm):
In this case we require the test to be run twice, once for prov_1 and then again for prov_2.
We are going to use the generate function to help us provide parameters to pass to
``pytest_generate_tests()``. ``pytest_generate_tests()`` requires three pieces of
information, ``argnames``, ``argvalues`` and an ``idlist``. ``argnames`` turns into the
names we use for fixtures. In this case, ``provider_obj`` and ``provider_mgmt_sys``.
``argvalues`` becomes the place where the ``provider_obj`` and ``provider_mgmt_sys``
items are stored. Each element of ``argvalues`` is a list containing a value for both
``provider_obj`` and ``provider_mgmt_sys``. Thus, taking an element from ``argvalues``
gives us the values to unpack to make up one test. An example is below, where we assume
that a provider object is obtained via the ``Provider`` class, and the ``mgmt_sys object``
is obtained via a ``Wrapanapi`` class.
===== =============== =================
~ provider_obj test_vm
===== =============== =================
prov1 Provider(prov1) abc1
prov2 Provider(prov2) abc2
===== =============== =================
This is analogous to the following layout:
========= =============== ===============
~ argnames[0] argnames[1]
========= =============== ===============
idlist[0] argvalues[0][0] argvalues[0][1]
idlist[1] argvalues[1][0] argvalues[1][1]
========= =============== ===============
This could be generated like so:
.. code-block:: python
def gen_providers:
argnames = ['provider_obj', 'test_vm']
argvalues = []
idlist = []
for provider in yaml['providers']:
idlist.append(provider)
argvalues.append([
Provider(yaml['providers'][provider]['name']),
yaml['providers'][provider]['test_vm'])
])
return argnames, argvalues, idlist
This is then used with pytest_generate_tests like so::
pytest_generate_tests(gen_providers)
Additionally, py.test joins the values of ``idlist`` with dashes to generate a unique id for this
test, falling back to joining ``argnames`` with dashes if ``idlist`` is not set. This is the value
seen in square brackets in a test report on parametrized tests.
More information on ``parametrize`` can be found in pytest's documentation:
* https://pytest.org/latest/parametrize.html#_pytest.python.Metafunc.parametrize
"""
import pytest
from cfme.common.provider import BaseProvider
from cfme.infrastructure.config_management import get_config_manager_from_config
from cfme.infrastructure.pxe import get_pxe_server_from_config
from cfme.roles import group_data
from utils.conf import cfme_data
from utils.log import logger
from utils.providers import ProviderFilter, list_providers
def _param_check(metafunc, argnames, argvalues):
"""Helper function to check if parametrizing is necessary
* If no argnames were specified, parametrization is unnecessary.
* If argvalues were generated, parametrization is necessary.
* If argnames were specified, but no values were generated, the test cannot run successfully,
and will be uncollected using the :py:mod:`markers.uncollect` mark.
See usage in :py:func:`parametrize`
Args:
metafunc: metafunc objects from pytest_generate_tests
argnames: argnames list for use in metafunc.parametrize
argvalues: argvalues list for use in metafunc.parametrize
Returns:
* ``True`` if this test should be parametrized
* ``False`` if it shouldn't be parametrized
* ``None`` if the test will be uncollected
"""
# If no parametrized args were named, don't parametrize
if not argnames:
return False
# If parametrized args were named and values were generated, parametrize
elif any(argvalues):
return True
# If parametrized args were named, but no values were generated, mark this test to be
# removed from the test collection. Otherwise, py.test will try to find values for the
# items in argnames by looking in its fixture pool, which will almost certainly fail.
else:
# module and class are optional, but function isn't
modname = getattr(metafunc.module, '__name__', None)
classname = getattr(metafunc.cls, '__name__', None)
funcname = metafunc.function.__name__
test_name = '.'.join(filter(None, (modname, classname, funcname)))
uncollect_msg = 'Parametrization for {} yielded no values,'\
' marked for uncollection'.format(test_name)
logger.warning(uncollect_msg)
# apply the mark
pytest.mark.uncollect(reason=uncollect_msg)(metafunc.function)
def parametrize(metafunc, argnames, argvalues, *args, **kwargs):
"""parametrize wrapper that calls :py:func:`_param_check`, and only parametrizes when needed
This can be used in any place where conditional parametrization is used.
"""
if _param_check(metafunc, argnames, argvalues):
metafunc.parametrize(argnames, argvalues, *args, **kwargs)
# if param check failed and the test was supposed to be parametrized around a provider
elif 'provider' in metafunc.fixturenames:
try:
# hack to pass trough in case of a failed param_check
# where it sets a custom message
metafunc.function.uncollect
except AttributeError:
pytest.mark.uncollect(
reason="provider was not parametrized did you forget --use-provider?"
)(metafunc.function)
def generate(*args, **kwargs):
"""Functional handler for inline pytest_generate_tests definition
Args:
gen_func: Test generator function, expected to return argnames, argvalues, and an idlist
suitable for use with pytest's parametrize method in pytest_generate_tests hooks
indirect: Optional keyword argument. If seen, it will be removed from the kwargs
passed to gen_func and used in the wrapped pytest parametrize call
scope: Optional keyword argument. If seen, it will be removed from the kwargs
passed to gen_func and used in the wrapped pytest parametrize call
filter_unused: Optional keyword argument. If True (the default), parametrized tests will
be inspected, and only argnames matching fixturenames will be used to parametrize the
test. If seen, it will be removed from the kwargs passed to gen_func.
*args: Additional positional arguments which will be passed to ``gen_func``
**kwargs: Additional keyword arguments whill be passed to ``gen_func``
Usage:
# Abstract example:
pytest_generate_tests = testgen.generate(arg1, arg2, kwarg1='a')
# Concrete example using all infrastructure providers and module scope
pytest_generate_tests = testgen.generate([InfraProvider], scope="module")
# Another concrete example using only VMware and SCVMM providers with 'retire' flag
pf = ProviderFilter(
classes=[WMwareProvider, SCVMMProvider]), required_flags=['retire'])
pytest_generate_tests = testgen.generate(
gen_func=testgen.providers, filters=[pf], scope="module")
Note:
``filter_unused`` is helpful, in that you don't have to accept all of the args in argnames
in every test in the module. However, if all tests don't share one common parametrized
argname, py.test may not have enough information to properly organize tests beyond the
'function' scope. Thus, when parametrizing in the module scope, it's a good idea to include
at least one common argname in every test signature to give pytest a clue in sorting tests.
"""
# Pull out/default kwargs for this function and parametrize; any args and kwargs that are not
# pulled out here will be passed into gen_func within pytest_generate_tests below
scope = kwargs.pop('scope', 'function')
indirect = kwargs.pop('indirect', False)
filter_unused = kwargs.pop('filter_unused', True)
gen_func = kwargs.pop('gen_func', providers_by_class)
def fixture_filter(metafunc, argnames, argvalues):
"""Filter fixtures based on fixturenames in the function represented by ``metafunc``"""
# Identify indeces of matches between argnames and fixturenames
keep_index = [e[0] for e in enumerate(argnames) if e[1] in metafunc.fixturenames]
# Keep items at indices in keep_index
def f(l):
return [e[1] for e in enumerate(l) if e[0] in keep_index]
# Generate the new values
argnames = f(argnames)
argvalues = map(f, argvalues)
return argnames, argvalues
# If parametrize doesn't get you what you need, steal this and modify as needed
def pytest_generate_tests(metafunc):
# Pass through of args and kwargs
argnames, argvalues, idlist = gen_func(metafunc, *args, **kwargs)
# Filter out argnames that aren't requested on the metafunc test item, so not all tests
# need all fixtures to run, and tests not using gen_func's fixtures aren't parametrized.
if filter_unused:
argnames, argvalues = fixture_filter(metafunc, argnames, argvalues)
# See if we have to parametrize at all after filtering
parametrize(metafunc, argnames, argvalues, indirect=indirect, ids=idlist, scope=scope)
return pytest_generate_tests
def providers(metafunc, filters=None):
""" Gets providers based on given (+ global) filters
Note:
Using the default 'function' scope, each test will be run individually for each provider
before moving on to the next test. To group all tests related to single provider together,
parametrize tests in the 'module' scope.
Note:
testgen for providers now requires the usage of test_flags for collection to work.
Please visit http://cfme-tests.readthedocs.org/guides/documenting.html#documenting-tests
for more details.
"""
filters = filters or []
argnames = []
argvalues = []
idlist = []
# Obtains the test's flags in form of a ProviderFilter
meta = getattr(metafunc.function, 'meta', None)
test_flag_str = getattr(meta, 'kwargs', {}).get('from_docs', {}).get('test_flag')
if test_flag_str:
test_flags = test_flag_str.split(',')
flags_filter = ProviderFilter(required_flags=test_flags)
filters = filters + [flags_filter]
for provider in list_providers(filters):
argvalues.append([provider])
# Use the provider key for idlist, helps with readable parametrized test output
idlist.append(provider.key)
# Add provider to argnames if missing
if 'provider' in metafunc.fixturenames and 'provider' not in argnames:
metafunc.function = pytest.mark.uses_testgen()(metafunc.function)
argnames.append('provider')
if metafunc.config.getoption('sauce'):
break
return argnames, argvalues, idlist
def providers_by_class(metafunc, classes, required_fields=None):
""" Gets providers by their class
Args:
metafunc: Passed in by pytest
classes: List of classes to fetch
required_fields: See :py:class:`cfme.utils.provider.ProviderFilter`
Usage:
# In the function itself
def pytest_generate_tests(metafunc):
argnames, argvalues, idlist = testgen.providers_by_class(
[GCEProvider, AzureProvider], required_fields=['provisioning']
)
metafunc.parametrize(argnames, argvalues, ids=idlist, scope='module')
# Using the parametrize wrapper
pytest_generate_tests = testgen.parametrize([GCEProvider], scope='module')
"""
pf = ProviderFilter(classes=classes, required_fields=required_fields)
return providers(metafunc, filters=[pf])
def all_providers(metafunc, **options):
""" Returns providers of all types """
return providers_by_class(metafunc, [BaseProvider], **options)
def auth_groups(metafunc, auth_mode):
"""Provides two test params based on the 'auth_modes' and 'group_roles' in cfme_data:
``group_name``:
expected group name in provided by the backend specified in ``auth_mode``
``group_data``:
list of nav destinations that should be visible as a member of ``group_name``
Args:
auth_mode: One of the auth_modes specified in ``cfme_data.get('auth_modes', {})``
"""
argnames = ['group_name', 'group_data']
argvalues = []
idlist = []
if auth_mode in cfme_data.get('auth_modes', {}):
# If auth_modes exists, group_roles is assumed to exist as well
for group in group_data:
argvalues.append([group, sorted(group_data[group])])
idlist.append(group)
return argnames, argvalues, idlist
def config_managers(metafunc):
"""Provides config managers
"""
argnames = ['config_manager_obj']
argvalues = []
idlist = []
data = cfme_data.get('configuration_managers', {})
for cfg_mgr_key in data:
argvalues.append([get_config_manager_from_config(cfg_mgr_key)])
idlist.append(cfg_mgr_key)
return argnames, argvalues, idlist
def pxe_servers(metafunc):
"""Provides pxe data based on the server_type
Args:
server_name: One of the server names to filter by, or 'all'.
"""
argnames = ['pxe_name', 'pxe_server_crud']
argvalues = []
idlist = []
data = cfme_data.get('pxe_servers', {})
for pxe_server in data:
argvalues.append([data[pxe_server]['name'],
get_pxe_server_from_config(pxe_server)])
idlist.append(pxe_server)
return argnames, argvalues, idlist
|
dajohnso/cfme_tests
|
utils/testgen.py
|
Python
|
gpl-2.0
| 14,573
|
[
"VisIt"
] |
2dbdffec8aff345422c5ef9f6f1ed528c2a272187dfdc409514a047ece72dab5
|
import os
import sys
import ast
import imp
import marshal
import meta
import cPickle as pickle
import optparse
from hashlib import sha256
from visitor import ScopeVisitor
from backend import Scope, Symbol, Instance, Context, Unknown
NAME = 'strictpy'
__version__ = '1.0.0'
def pyc_source(pyc_contents):
code_section = pyc_contents[8:]
code = marshal.load(code_section)
return meta.dump_python_source(meta.decompile(code))
def get_module_source_path(import_name, current_filepath):
if import_name is None:
module_file = None
module_path = current_filepath
else:
source_dir = os.path.abspath(os.path.dirname(current_filepath))
# sys.path includes PYTHONPATH env var
python_paths = [source_dir] + sys.path[1:]
try:
module_file, module_path, _ = imp.find_module(
import_name, python_paths)
#print(import_name + ' => ' + module_path)
except ImportError:
raise RuntimeError('Could not find module for ' + import_name)
if module_file:
module_file.close()
if module_file is None and module_path == '':
# module does not live in a file
raise RuntimeError('Could not find module source for '
+ str(import_name))
elif module_file is None: # probably a package
if os.path.isdir(module_path):
for extension in ['py', 'pyc', 'pyo']:
filepath = os.path.join(module_path, '__init__.' + extension)
if os.path.exists(filepath):
return filepath, True
raise RuntimeError('Could not find __init__.py for '
+ str(import_name))
else:
raise RuntimeError('Unrecognized module type')
return module_path, False
def import_source(import_name, current_filepath):
module_path, is_package = get_module_source_path(
import_name, current_filepath)
if module_path.endswith('.py'):
with open(module_path) as module_file:
return module_file.read(), module_path, is_package
elif module_path.endswith(('.pyc', '.pyo')):
py_path = module_path[:-1] # look for ".py" file in same dir
if os.path.exists(py_path):
with open(py_path) as py_file:
return py_file.read(), module_path, is_package
else:
with open(module_path) as module_file:
return pyc_source(module_file.read()), module_path, is_package
else:
raise RuntimeError('Unrecognized extension: ' + module_path)
def import_module(name, current_filepath, imported, warn):
try:
source, filepath, is_package = import_source(name, current_filepath)
except RuntimeError as error:
warn('import-failed', name + ' ' + current_filepath + '\n' + str(error))
return Unknown(), current_filepath, False
cache_filename = sha256(filepath + '~' + source).hexdigest()
cache_filepath = os.path.join(os.sep, 'var', 'cache', NAME,
__version__, cache_filename)
if os.path.exists(cache_filepath):
with open(cache_filepath, 'rb') as cache_file:
return pickle.load(cache_file), filepath, is_package
elif filepath in imported:
#i = imported.index(filepath)
#paths = ' -> '.join(imported[i:] + [filepath])
#print('CIRCULAR: ' + paths)
return Instance('object', Scope()), filepath, is_package
else:
imported.append(filepath)
scope, _, _ = analyze(source, filepath, imported=imported)
module = Instance('object', scope)
with open(cache_filepath, 'wb') as cache_file:
pickle.dump(module, cache_file, pickle.HIGHEST_PROTOCOL)
return module, filepath, is_package
def import_chain(fully_qualified_name, asname, import_scope, current_filepath,
imported, warn):
scope = import_scope
filepath = current_filepath
is_package = True
names = fully_qualified_name.split('.') if fully_qualified_name else [None]
for name in names:
if scope is None:
warn('import-error', fully_qualified_name)
return Unknown()
if is_package:
import_type, filepath, is_package = import_module(
name, filepath, imported, warn)
if asname is None:
scope.add(Symbol(name, import_type))
scope = (import_type.attributes if isinstance(import_type, Instance)
else None)
else:
import_type = scope.get_type(name)
scope = None
if asname is not None:
import_scope.add(Symbol(asname, import_type))
return import_type
def get_path_for_level(filepath, level):
for _ in range(level):
filepath = os.path.dirname(filepath)
if level > 0:
filepath = os.path.join(filepath, '__init__.py')
return filepath
class ModuleVisitor(ScopeVisitor):
def visit_Module(self, node):
self.begin_scope()
self.generic_visit(node)
# don't end scope so that caller can see what is in the scope
def visit_Import(self, node):
scope = self._context.get_top_scope()
warn = lambda category, details: self._warnings.warn(
node, category, details)
for alias in node.names:
import_chain(alias.name, alias.asname, scope, self._filepath,
self._imported, warn)
def visit_ImportFrom(self, node):
filepath = get_path_for_level(self._filepath, node.level)
parts = node.module.split('.') if node.module else [None]
warn = lambda category, details: self._warnings.warn(
node, category, details)
for part in parts:
import_type, filepath, is_package = import_module(
part, filepath, self._imported, warn)
for alias in node.names:
symbol_name = alias.asname or alias.name
if is_package:
symbol_type, _, _ = import_module(alias.name, filepath,
self._imported, warn)
else:
if isinstance(import_type, Instance):
symbol_type = import_type.attributes.get_type(alias.name)
if symbol_type is None:
warn('name-not-found', alias.name)
continue
else:
symbol_type = Unknown()
self._context.add(Symbol(symbol_name, symbol_type))
def builtin_context():
filename = 'builtins.py'
context = Context()
this_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(this_dir, filename)) as builtins_file:
source = builtins_file.read()
analyze(source, filename, context)
return context
def analyze(source, filepath=None, context=None, imported=[]):
tree = ast.parse(source, filepath)
visitor = ModuleVisitor(filepath, context or builtin_context(), imported)
visitor.visit(tree)
return visitor.report()
def analysis(source, filepath=None, context=None, show_types=False):
scope, warnings, _ = analyze(source, filepath, context)
warning_output = str(warnings)
if show_types:
scope_output = str(scope)
separator = '\n' if warning_output and scope_output else ''
return scope_output + separator + warning_output
else:
return warning_output
def main():
parser = optparse.OptionParser()
parser.add_option('-t', '--types', dest='show_types', default=False,
help='Show types of symbols defined in top scope')
options, args = parser.parse_args()
if len(args) == 0:
filepath = ''
source = sys.stdin.read()
else:
filepath = args[0]
with open(filepath) as source_file:
source = source_file.read()
#sys.stdout.write(analysis(source, filepath, Context()))
sys.stdout.write(analysis(source, filepath, show_types=options.show_types))
if __name__ == '__main__':
main()
|
clark800/pystarch
|
main.py
|
Python
|
mit
| 8,180
|
[
"VisIt"
] |
6a3f6c44615bb3b1621eb81c99c69fce5ee91798753d525ca15c0b5e33c87bb9
|
"""Collection of :class:`~chainer.Function` implementations."""
from chainer.functions.activation import clipped_relu
from chainer.functions.activation import elu
from chainer.functions.activation import leaky_relu
from chainer.functions.activation import log_softmax
from chainer.functions.activation import lstm
from chainer.functions.activation import maxout
from chainer.functions.activation import prelu
from chainer.functions.activation import relu
from chainer.functions.activation import sigmoid
from chainer.functions.activation import slstm
from chainer.functions.activation import softmax
from chainer.functions.activation import softplus
from chainer.functions.activation import tanh
from chainer.functions.array import broadcast
from chainer.functions.array import concat
from chainer.functions.array import copy
from chainer.functions.array import expand_dims
from chainer.functions.array import reshape
from chainer.functions.array import select_item
from chainer.functions.array import split_axis
from chainer.functions.array import swapaxes
from chainer.functions.array import transpose
from chainer.functions.array import where
from chainer.functions.connection import bilinear
from chainer.functions.connection import convolution_2d
from chainer.functions.connection import deconvolution_2d
from chainer.functions.connection import embed_id
from chainer.functions.connection import linear
from chainer.functions.evaluation import accuracy
from chainer.functions.evaluation import binary_accuracy
from chainer.functions.loss import contrastive
from chainer.functions.loss import cross_covariance
from chainer.functions.loss import ctc
from chainer.functions.loss import hinge
from chainer.functions.loss import huber_loss
from chainer.functions.loss import mean_squared_error
from chainer.functions.loss import negative_sampling
from chainer.functions.loss import sigmoid_cross_entropy
from chainer.functions.loss import softmax_cross_entropy
from chainer.functions.loss import vae # NOQA
from chainer.functions.math import basic_math # NOQA
from chainer.functions.math import batch_l2_norm_squared
from chainer.functions.math import clip
from chainer.functions.math import det
from chainer.functions.math import exponential
from chainer.functions.math import identity
from chainer.functions.math import inv
from chainer.functions.math import matmul
from chainer.functions.math import minmax
from chainer.functions.math import sum
from chainer.functions.math import trigonometric
from chainer.functions.noise import dropout
from chainer.functions.noise import gaussian
from chainer.functions.normalization import batch_normalization
from chainer.functions.normalization import local_response_normalization
from chainer.functions.pooling import average_pooling_2d
from chainer.functions.pooling import max_pooling_2d
from chainer.functions.pooling import spatial_pyramid_pooling_2d
from chainer.functions.pooling import unpooling_2d
from chainer.links.activation import prelu as links_prelu
from chainer.links.connection import bilinear as links_bilinear
from chainer.links.connection import convolution_2d as links_convolution_2d
from chainer.links.connection import embed_id as links_embed_id
from chainer.links.connection import inception
from chainer.links.connection import inceptionbn
from chainer.links.connection import linear as links_linear
from chainer.links.connection import parameter
from chainer.links.loss import hierarchical_softmax
from chainer.links.loss import negative_sampling as links_negative_sampling
from chainer.links.normalization import batch_normalization \
as links_batch_normalization
ClippedReLU = clipped_relu.ClippedReLU
clipped_relu = clipped_relu.clipped_relu
ConnectionistTemporalClassification = ctc.ConnectionistTemporalClassification
connectionist_temporal_classification \
= ctc.connectionist_temporal_classification
ELU = elu.ELU
elu = elu.elu
LeakyReLU = leaky_relu.LeakyReLU
leaky_relu = leaky_relu.leaky_relu
LogSoftmax = log_softmax.LogSoftmax
log_softmax = log_softmax.log_softmax
LSTM = lstm.LSTM
lstm = lstm.lstm
maxout = maxout.maxout
prelu = prelu.prelu
ReLU = relu.ReLU
relu = relu.relu
Sigmoid = sigmoid.Sigmoid
sigmoid = sigmoid.sigmoid
SLSTM = slstm.SLSTM
slstm = slstm.slstm
Softmax = softmax.Softmax
softmax = softmax.softmax
Softplus = softplus.Softplus
softplus = softplus.softplus
Tanh = tanh.Tanh
tanh = tanh.tanh
Broadcast = broadcast.Broadcast
BroadcastTo = broadcast.BroadcastTo
broadcast_to = broadcast.broadcast_to
broadcast = broadcast.broadcast
Concat = concat.Concat
concat = concat.concat
Copy = copy.Copy
copy = copy.copy
ExpandDims = expand_dims.ExpandDims
expand_dims = expand_dims.expand_dims
Reshape = reshape.Reshape
reshape = reshape.reshape
SplitAxis = split_axis.SplitAxis
split_axis = split_axis.split_axis
SelectItem = select_item.SelectItem
select_item = select_item.select_item
Swapaxes = swapaxes.Swapaxes
swapaxes = swapaxes.swapaxes
Transpose = transpose.Transpose
transpose = transpose.transpose
Where = where.Where
where = where.where
bilinear = bilinear.bilinear
convolution_2d = convolution_2d.convolution_2d
deconvolution_2d = deconvolution_2d.deconvolution_2d
embed_id = embed_id.embed_id
linear = linear.linear
Accuracy = accuracy.Accuracy
accuracy = accuracy.accuracy
BinaryAccuracy = binary_accuracy.BinaryAccuracy
binary_accuracy = binary_accuracy.binary_accuracy
bernoulli_nll = vae.bernoulli_nll
BinaryHierarchicalSoftmax = hierarchical_softmax.BinaryHierarchicalSoftmax
Contrastive = contrastive.Contrastive
contrastive = contrastive.contrastive
CrossCovariance = cross_covariance.CrossCovariance
cross_covariance = cross_covariance.cross_covariance
gaussian_kl_divergence = vae.gaussian_kl_divergence
gaussian_nll = vae.gaussian_nll
Hinge = hinge.Hinge
hinge = hinge.hinge
MeanSquaredError = mean_squared_error.MeanSquaredError
mean_squared_error = mean_squared_error.mean_squared_error
negative_sampling = negative_sampling.negative_sampling
SigmoidCrossEntropy = sigmoid_cross_entropy.SigmoidCrossEntropy
sigmoid_cross_entropy = sigmoid_cross_entropy.sigmoid_cross_entropy
HuberLoss = huber_loss.HuberLoss
huber_loss = huber_loss.huber_loss
SoftmaxCrossEntropy = softmax_cross_entropy.SoftmaxCrossEntropy
softmax_cross_entropy = softmax_cross_entropy.softmax_cross_entropy
BatchDet = det.BatchDet
batch_det = det.batch_det
BatchInv = inv.BatchInv
batch_inv = inv.batch_inv
BatchL2NormSquared = batch_l2_norm_squared.BatchL2NormSquared
batch_l2_norm_squared = batch_l2_norm_squared.batch_l2_norm_squared
BatchMatMul = matmul.BatchMatMul
batch_matmul = matmul.batch_matmul
Clip = clip.Clip
clip = clip.clip
Cos = trigonometric.Cos
cos = trigonometric.cos
det = det.det
Exp = exponential.Exp
exp = exponential.exp
Identity = identity.Identity
identity = identity.identity
Inv = inv.Inv
inv = inv.inv
Log = exponential.Log
log = exponential.log
MatMul = matmul.MatMul
matmul = matmul.matmul
Max = minmax.Max
max = minmax.max
Min = minmax.Min
min = minmax.min
Sin = trigonometric.Sin
sin = trigonometric.sin
Sum = sum.Sum
sum = sum.sum
Dropout = dropout.Dropout
dropout = dropout.dropout
Gaussian = gaussian.Gaussian
gaussian = gaussian.gaussian
fixed_batch_normalization = batch_normalization.fixed_batch_normalization
batch_normalization = batch_normalization.batch_normalization
LocalResponseNormalization = \
local_response_normalization.LocalResponseNormalization
local_response_normalization = \
local_response_normalization.local_response_normalization
AveragePooling2D = average_pooling_2d.AveragePooling2D
average_pooling_2d = average_pooling_2d.average_pooling_2d
MaxPooling2D = max_pooling_2d.MaxPooling2D
max_pooling_2d = max_pooling_2d.max_pooling_2d
SpatialPyramidPooling2D = spatial_pyramid_pooling_2d.SpatialPyramidPooling2D
spatial_pyramid_pooling_2d = \
spatial_pyramid_pooling_2d.spatial_pyramid_pooling_2d
Unpooling2D = unpooling_2d.Unpooling2D
unpooling_2d = unpooling_2d.unpooling_2d
# Import for backward compatibility
PReLU = links_prelu.PReLU
Bilinear = links_bilinear.Bilinear
Convolution2D = links_convolution_2d.Convolution2D
EmbedID = links_embed_id.EmbedID
Inception = inception.Inception
InceptionBN = inceptionbn.InceptionBN
Linear = links_linear.Linear
Parameter = parameter.Parameter
NegativeSampling = links_negative_sampling.NegativeSampling
BatchNormalization = links_batch_normalization.BatchNormalization
|
AlpacaDB/chainer
|
chainer/functions/__init__.py
|
Python
|
mit
| 8,397
|
[
"Gaussian"
] |
a4655b7c81379cf488dc6941cf2a215a69f69e98959a6b1b3034158f19717d37
|
# -*- python -*-
# Package : omniidl
# util.py Created on: 1999/11/2
# Author : David Scott (djs)
#
# Copyright (C) 2002-2008 Apasphere Ltd
# Copyright (C) 1999 AT&T Laboratories Cambridge
#
# This file is part of omniidl.
#
# omniidl is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
#
# Description:
#
# General utility functions designed for the C++ backend
# $Id: util.py 5867 2009-05-06 16:16:18Z dgrisby $
# $Log$
# Revision 1.17.2.3 2008/12/03 10:53:58 dgrisby
# Tweaks leading to Python 3 support; other minor clean-ups.
#
# Revision 1.17.2.2 2003/10/23 11:25:55 dgrisby
# More valuetype support.
#
# Revision 1.17.2.1 2003/03/23 21:02:40 dgrisby
# Start of omniORB 4.1.x development branch.
#
# Revision 1.14.2.4 2002/09/03 09:32:57 dgrisby
# C++ back-end bails out if asked to compile more than one file.
#
# Revision 1.14.2.3 2001/06/08 17:12:14 dpg1
# Merge all the bug fixes from omni3_develop.
#
# Revision 1.14.2.2 2000/10/12 15:37:49 sll
# Updated from omni3_1_develop.
#
# Revision 1.15.2.3 2000/09/14 16:03:03 djs
# Remodularised C++ descriptor name generator
# Bug in listing all inherited interfaces if one is a forward
# repoID munging function now handles #pragma ID in bootstrap.idl
# Naming environments generating code now copes with new IDL AST types
# Modified type utility functions
# Minor tidying
#
# Revision 1.15.2.2 2000/08/21 11:34:37 djs
# Lots of omniidl/C++ backend changes
#
# Revision 1.15.2.1 2000/08/07 17:48:13 dpg1
# Merge from omni3_develop again.
#
# Revision 1.12.2.8 2000/06/28 13:59:04 dpg1
# Remove dependency on traceback module.
#
# Revision 1.12.2.7 2000/06/26 16:23:11 djs
# Added new backend arguments.
# Better error handling when encountering unsupported IDL (eg valuetypes)
# Refactoring of configuration state mechanism.
#
# Revision 1.12.2.6 2000/05/31 18:02:17 djs
# Better output indenting (and preprocessor directives now correctly output at
# the beginning of lines)
#
# Revision 1.12.2.5 2000/04/26 18:22:15 djs
# Rewrote type mapping code (now in types.py)
# Rewrote identifier handling code (now in id.py)
#
# Revision 1.12.2.4 2000/03/20 11:49:28 djs
# Added a "LazyStream" class to help reduce the amount of output buffering
# required
#
# Revision 1.12.2.3 2000/03/09 15:21:40 djs
# Better handling of internal compiler exceptions (eg attempts to use
# wide string types)
#
# Revision 1.12.2.2 2000/02/18 23:01:20 djs
# Updated example implementation code generating module
#
# Revision 1.12.2.1 2000/02/14 18:34:56 dpg1
# New omniidl merged in.
#
# Revision 1.12 2000/01/20 18:25:53 djs
# Got rid of some superfluous whitespace
#
# Revision 1.11 2000/01/17 16:59:53 djs
# Some whitespace stripping in StringStream
#
# Revision 1.10 2000/01/12 17:47:38 djs
# Reverted to simpler output stream design- will probably use dpg1's version
# in common with the python back end.
#
# Revision 1.9 2000/01/07 20:31:18 djs
# Regression tests in CVSROOT/testsuite now pass for
# * no backend arguments
# * tie templates
# * flattened tie templates
# * TypeCode and Any generation
#
# Revision 1.8 1999/11/29 19:27:00 djs
# Code tidied and moved around. Some redundant code eliminated.
#
# Revision 1.7 1999/11/26 18:51:44 djs
# Generates nicer output when doing blank substitutions
#
# Revision 1.6 1999/11/19 20:05:39 djs
# Removed superfluous function. Added zip.
#
# Revision 1.5 1999/11/15 19:10:55 djs
# Added module for utility functions specific to generating skeletons
# Union skeletons working
#
# Revision 1.4 1999/11/10 20:19:32 djs
# Option to emulate scope bug in old backend
# Array struct element fix
# Union sequence element fix
#
# Revision 1.3 1999/11/04 19:05:02 djs
# Finished moving code from tmp_omniidl. Regression tests ok.
#
# Revision 1.2 1999/11/03 17:35:07 djs
# Brought more of the old tmp_omniidl code into the new tree
#
# Revision 1.1 1999/11/03 11:09:50 djs
# General module renaming
#
"""General utility functions used by the C++ backend"""
from omniidl import idlutil, idltype
from omniidl_be.cxx import config
import sys, re, string
try:
import traceback
have_traceback = 1
except:
have_traceback = 0
## Fatal error handling function ##################################
##
def fatalError(explanation):
if config.state['Debug']:
# don't exit the program in debug mode...
print "omniidl: fatalError occurred, in debug mode."
for line in string.split(explanation, "\n"):
print ">> " + line
#print "Configuration state:"
#print "-------------------------"
#config.state.dump()
if have_traceback:
print "Stack:"
print "-------------------------"
traceback.print_stack()
print "Exception:"
print "-------------------------"
traceback.print_exc()
sys.exit(1)
lines = string.split(explanation, "\n")
lines = [ "Fatal error in C++ backend", "" ] + lines
for line in lines:
sys.stderr.write("omniidl: " + line + "\n")
sys.stderr.write("""\
For more information (mailing list archives, bug reports etc.) please visit
the webpage:
http://omniorb.sourceforge.net/
""")
sys.exit(1)
# Called whenever an unsupported IDL construct is found in the input
# (necessary because the front end supports all the new CORBA 2.3
# constructs whereas the ORB and correspondingly this backend does not)
def unsupportedIDL():
e = """\
Unsupported IDL construct encountered in input.
"""
fatalError(e)
## Set manipulation functions ######################################
##
def union(a, b):
result = a[:]
for x in b:
if x not in result:
result.append(x)
return result
def minus(a, b):
result = []
for x in a:
if x not in b:
result.append(x)
return result
def intersect(a, b):
result = []
for x in a:
if x in b:
result.append(x)
return result
def setify(set):
new_set = []
for x in set:
if x not in new_set:
new_set.append(x)
return new_set
## List manipulation functions #####################################
##
def zip(a, b):
if a == [] or b == []: return []
return [(a[0], b[0])] + zip(a[1:], b[1:])
def fold(list, base, fn):
if len(list) == 1:
return fn(list[0], base)
first = fn(list[0], list[1])
rest = [first] + list[2:]
return fold(rest, base, fn)
## Assorted other functions ########################################
##
class Stack:
def __init__(self):
self.__list = []
def push(self, thing):
self.__list.append(thing)
def pop(self):
assert self.__list
thing = self.__list[-1]
self.__list = self.__list[0:-1]
return thing
|
ogata-lab/rtmsdk-mac
|
x86_64/lib/python2.7/site-packages/omniidl_be/cxx/util.py
|
Python
|
lgpl-2.1
| 7,529
|
[
"VisIt"
] |
5b73819eaa9e55762dd7ab80f04388e00fd0bdaf6ea9d4f3e92aa479dde367a2
|
#!/usr/bin/python3
# Copyright 2015 Francisco Pina Martins <f.pinamartins@gmail.com>
# This file is part of BioGenepop.
# BioGenepop is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# BioGenepop is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with BioGenepop. If not, see <http://www.gnu.org/licenses/>.
from Bio.PopGen.GenePop.EasyController import EasyController
# WARNING! Requires a patched BioPython to work!
# https://github.com/biopython/biopython/issues/582
def get_exp_obs_het(filehandle, outfile_name):
"""Doc here"""
pop_names, loci_names = filehandle.get_basic_info()
outfile=open(outfile_name,'w')
double_loci = [loci_names[i//3] for i in range(len(loci_names)*3)]
outfile.write("\t" + "\t".join(double_loci) + "\n")
for pop in range(len(pop_names)):
hetros = pop_names[pop] + "\t"
loci_map = filehandle.test_hw_pop(pop, "excess")
for locus in loci_names:
exp_homo, obs_homo, exp_hetero, obs_hetero = filehandle.get_heterozygosity_info(pop,locus)
if loci_map[locus] is not None:
hetros += str(loci_map[locus][0]) + "\t" + str(exp_hetero) + "\t" + str(obs_hetero) + "\t"
else:
hetros += "-\t" + str(exp_hetero) + "\t" + str(obs_hetero) + "\t"
hetros = hetros.rstrip() + "\n"
outfile.write(hetros)
outfile.close()
def filehandler(infile):
"""Doc here"""
filehandle = EasyController(infile)
return filehandle
if __name__ == "__main__":
# Usage: python3 BioGenepop.py infile outfile
from sys import argv
filehandle = filehandler(argv[1])
get_exp_obs_het(filehandle, argv[2])
|
CoBiG2/RAD_Tools
|
BioGenepop.py
|
Python
|
gpl-3.0
| 2,097
|
[
"Biopython"
] |
8345e213f6a7176a877425e82adec712db16399e71ecd0bd3e52cf416deb8685
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#al_finder.py
#03/2014
__author__ = 'Igor Rodrigues da Costa'
__contact__ = 'igor.bioinfo@gmail.com'
''' This program will get ALs from the genomes from a sum file,
align them and make a nexus file.'''
import os
import argparse
import shlex
from hashlib import sha1
from random import sample, choice
from subprocess import Popen
from Bio import SeqIO
from Bio import AlignIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from Bio.Align import MultipleSeqAlignment
from Bio.AlignIO import _FormatToWriter
from Bio.AlignIO.NexusIO import NexusWriter
from Bio.Nexus import Nexus
class NexusWriterInterleaved(NexusWriter):
#Set interleave to always true.
def write_alignment(self, alignment):
#Creates an empty Nexus object, adds the sequences,
#and then gets Nexus to prepare the output.
if len(alignment) == 0:
raise ValueError("Must have at least one sequence")
columns = alignment.get_alignment_length()
if columns == 0:
raise ValueError("Non-empty sequences are required")
minimal_record = "#NEXUS\nbegin data; dimensions ntax=0 nchar=0; " \
+ "format datatype=%s; end;" \
% self._classify_alphabet_for_nexus(alignment._alphabet)
n = Nexus.Nexus(minimal_record)
n.alphabet = alignment._alphabet
for record in alignment:
n.add_sequence(record.id, str(record.seq))
n.write_nexus_data(self.handle, interleave=True)
_FormatToWriter['nexus'] = NexusWriterInterleaved
def argument_parser(hlp = False):
'''al_align.py'''
default_sum = os.getcwd() + '/al_blast.sum'
parser = argparse.ArgumentParser(description = 'al_align.py',\
argument_default = None, fromfile_prefix_chars = '@')
parser.add_argument('-s', '--sum', nargs = '?', type = str, required = True,\
dest = 'sum', help = 'Path to sum file.')
parser.add_argument('-o', '--outpath', nargs = '?', type = str, default = os.getcwd(),\
dest = 'outpath', help = 'Path where the aligned results will be saved. (default: %(default)s)')
parser.add_argument('-l', '--log', nargs = '?', type = str, default = 'al_align.log',\
dest = 'log', help = 'Log file. (default: %(default)s)')
parser.add_argument('-f', '--filter', nargs = '*', type = str,\
dest = 'filter', help = 'Folder to look for duplicated fasta to remove. (default: %(default)s)')
parser.add_argument('-g', '--genomes', nargs = '*', type = str,\
dest = 'genomes', help = 'Path to all genomes used.')
parser.add_argument('-c', '--chromossomes', nargs = '*', type = str,\
dest = 'excluded', help = 'Chromossomes to be excluded.')
parser.add_argument('-a', '--min_align', nargs = '?', type = int, default = 500,\
dest = 'align_size', help = 'Minimum final alignment lenght.(default: %(default)s)')
parser.add_argument('-d', '--distance', nargs = '?', type = int, default = 200000,\
dest = 'idist', help = 'Minimum distance between ALs.(default: %(default)s)')
parser.add_argument('--minsize', nargs = '?', type = int, default = 500,\
dest = 'min_size', help = 'Minimum sequence size.(default: %(default)s)')
parser.add_argument('--distance_file', nargs = '?', type = str, default = 'al_align.dist',\
dest = 'dist_file', help = 'File to save all distances.')
parser.add_argument('--parts', nargs = '?', type = int, default = 0,\
dest = 'parts', help = 'Number of parts the locus was spliced.(default: %(default)s)')
parser.add_argument('-p', '--pick', nargs = '?', type = int,\
dest = 'pick', help = 'Pick only N ALs.')
parser.add_argument('--remove_gaps', action = 'store_true', dest = 'nogaps', help = 'Remove gaps from the final alignment.')
parser.add_argument('--chromo_sep', action = 'store_true', dest = 'chromo_sep', help = 'Separate ALs by chromossome.')
parser.add_argument('-v', '--verbose', action = 'store_true', dest = 'verbose', help = 'Verbose switch.')
if hlp:
args = parser.parse_args(['-h'])
else:
args = parser.parse_args().__dict__
return args
def main(args):
#args processing:
if args['verbose']:
a = open(args['log'], 'w')
a.close()
def vprint(*a):
# Print only when verbose
with open(args['log'], 'a') as log:
log.write(' '.join(a))
log.write('\n')
else:
def vprint(*a):
return None
for k in args:
vprint(k, ' ', str(args[k]))
if not args['genomes']:
print 'No genomes supplied. Check usage: al_align.py --help'
return
vprint('#######################')
if args['outpath'][-1] != '/':
args['outpath'] += '/'
#Finished analysing arguments.
nexus_files, sizes = [], []
args['min_seqs'] = len(args['genomes'])
filtered_als = filter_al(args, vprint) # Remove ALs too close to each other and filters excluded chromossomes.
seqs = read_sum(args['genomes'], args['sum'], filtered_als, vprint, args['parts'], chromo_sep=args['chromo_sep']) #seqs[AL1] = [Homo:Seq_H, Gorilla:Seq_G, Pongo:Seq_P, Pan:Seq_C]
separated_files = write_seqs(args['outpath'], seqs, args['min_seqs'], args['min_size'], vprint, chromo_sep=args['chromo_sep'])
if args['filter']:
for f in args['filter']:
separated_files = check_for_duplicates(separated_files, f, vprint)
aligned_files = run_clustalw(args['outpath'], separated_files)
vprint(str(len(separated_files)), ' fasta files.')
folder = '/'.join(args['outpath'].split('/')[:-1]) + '/'
aligned_files = [folder + filename for filename in os.listdir(folder) if '.aln' in filename]
vprint(str(len(aligned_files)), ' aligned files.')
if args['chromo_sep']:
join_nexus_by_chromo(args['outpath'], aligned_files, args['align_size'], args['nogaps'], vprint)
else:
sizes, nexus_files = make_nexus(args['outpath'], aligned_files, args['align_size'], args['pick'], args['nogaps'], vprint)
if args['pick']:
assert len(nexus_files) <= args['pick']
join_nexus(args['outpath'], sizes, nexus_files)
join_fasta(args['outpath'], nexus_files)
make_phylip(args['outpath'], nexus_files)
with open(args['outpath'] + 'sample.log', 'w') as sample_file:
for n in nexus_files:
sample_file.write(n + '\n')
# if not sizes:
# sizes = get_sizes(nexus_files)
# if not nexus_files:
# nexus_files = [f for f in os.listdir(args['outpath']) if f.endswith('.nexus') and 'all' not in f]
def read_sum(genome_files, sumf, filtered_als, vprint, parts, chromo_sep=False):
#will open all genomes on memory!!!
#todo: change to SeqIO.index to improve memory usage OR change BLAST outfile to get the alignments.
#check pyfaidx
#al1 db1 sbj start end
#al1 db2 sbj start end
#al1 db3 sbj start end
#al1 db4 sbj start end
#al2 db1 sbj start end
seqs = {} #seqs[AL1][Gorilla] = Seq_Gorilla
genomes = {} #genomes = {genome:{chr:SEQ}}
for g in genome_files:
genome_name = g.split('/')[-1]
genomes[genome_name] = {}
vprint('opening genome: ' + genome_name)
with open(g, 'r') as genome_file:
for seq_rec in SeqIO.parse(genome_file, 'fasta'):
chromo = seq_rec.description.split()[0]
genomes[genome_name][chromo] = seq_rec.seq
with open(sumf, 'r') as sumf:
for l in sumf:
a = l.split('\t')
al = a[0]
if a[1] in genomes:
genome = a[1] #UGLY! Only works if file names of db and genome are the same...
else:
try:
genome = genome_files[int(a[1].split('.')[0])]
genome = genome.split('/')[-1]
except:
raise('FORMATDB Database must have the same name as Genome fasta')
chromo = a[2]
start = int(a[3])
end = int(a[4])
if al in filtered_als:
if start < end:
seq = genomes[genome][chromo][start:end]
else:
seq = genomes[genome][chromo][end:start]
seq = seq.reverse_complement()
if chromo_sep:
if al in seqs:
seqs[al][genome] = (seq, chromo)
else:
seqs[al] = {genome:(seq, chromo)}
else:
if al in seqs:
seqs[al][genome] = seq
else:
seqs[al] = {genome:seq}
if parts:
joined_seqs = {}
for al in seqs.keys():
al_id = al.split('.')[0]
if al_id not in joined_seqs:
try:
if chromo_sep:
seq = seqs[al_id+'.1']
for n in range(1, parts):
for genome in seq.keys():
seq[genome] = (seq[genome][0] + seqs[al_id+'.'+str(n+1)][genome][0], seq[genome][1])
joined_seqs[al_id] = seq
else:
seq = seqs[al_id+'.1']
for n in range(1, parts):
for genome in seq.keys():
seq[genome] += seqs[al_id+'.'+str(n+1)][genome]
joined_seqs[al_id] = seq
except KeyError:
continue #some part was not found
seqs = joined_seqs
return seqs
def filter_al(args, vprint):
''' Filter ALs based on the distance between them.'''
al_dict = {}
filtered_als = []
dist = args['idist']
last_id = ''
all_als = 0
removed_chromo = 0
with open(args['sum'], 'r') as sumf:
for l in sumf:
line = l.split('\t')
if line[0] != last_id: #first line in sum file must be from reference genome!
al_id = line[0]
al_start = int(line[3])
al_end = int(line[4])
al_chromo = line[2]
last_id = al_id
if al_chromo in al_dict:
al_dict[al_chromo].append((al_id, al_start, al_end, al_chromo))
else:
al_dict[al_chromo] = [(al_id, al_start, al_end, al_chromo)]
try:
f = open(args['dist_file'], 'w')
f.close()
except:
vprint('Could not open distance file: Check your permissions')
for chromo in sorted(al_dict.keys()):
al_list = sort_al(al_dict[chromo])
all_als += len(al_list)
if args['excluded'] and (chromo in args['excluded']):
removed_chromo += len(al_list)
continue
end = -dist - 1 #don't exclude first al
last_chromo = ''
with open(args['dist_file'], 'a') as dist_file:
for al in al_list: #al = (id, start, end, chromo)
if end + dist < al[1] or (args['parts'] and ('.1' not in al[0])):
if last_chromo == al[3]:
dist_file.write('{0}\t{1}\n'.format(al[0], al[1] - end))
else:
dist_file.write('Chromo: ' + al[3] + '\n')
dist_file.write('{0}\t{1}\n'.format(al[0], al[1] - end))
last_chromo = al[3]
filtered_als.append(al[0])
end = al[2]
removed_als = all_als - len(filtered_als)#[al[0] for al in all_als if al[0] not in filtered_als]
removed_distance = removed_als - removed_chromo
vprint(str(removed_distance) + '/' + str(all_als) + ' ALs removed due to inter-distance filter.')
vprint(str(removed_chromo) + '/' + str(all_als) + ' ALs removed due chromossome filter.')
return filtered_als
def sort_al(al_list):
def c(x, y):
return cmp(x[1], y[1])
sorted_al = sorted(al_list, cmp = c)
return sorted_al
def write_seqs(outpath, seq_dict, minseqs, minsize, vprint, chromo_sep=False):
files = []
for al in seq_dict:
seqs = seq_dict[al]
new_file = outpath + al + '.fasta'
records = []
filter = True
if len(seqs) < minseqs:
vprint(al, str(len(seqs)), ' too few sequences')
filter = False
for sp in sorted(seqs):
if len(seqs[sp]) < minsize:
vprint(al, str(len(seqs[sp])), ' sequence size')
filter = False
break
if filter:
for n, sp in enumerate(sorted(seqs)):
name = sp.split('.')[0].split('_')[0]
records.append(SeqRecord(seqs[sp], id = name, description = al)) #ID is not n if there is someone missing!
with open(new_file, 'w') as outfile:
SeqIO.write(records, outfile, 'fasta')
files.append(new_file.split('/')[-1])
return files
def chunk_reader(fobj, chunk_size=1024):
"""Generator that reads a file in chunks of bytes"""
while True:
chunk = fobj.read(chunk_size)
if not chunk:
return
yield chunk
def check_for_duplicates(files, path, vprint, hash=sha1):
hashes = {}
original_len = len(files)
for f in files:
hashobj = hash()
for chunk in chunk_reader(open(f, 'rb')):
hashobj.update(chunk)
file_id = (hashobj.digest(), os.path.getsize(f))
hashes[file_id] = f
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
full_path = os.path.join(dirpath, filename)
hashobj = hash()
for chunk in chunk_reader(open(full_path, 'rb')):
hashobj.update(chunk)
file_id = (hashobj.digest(), os.path.getsize(full_path))
duplicate = hashes.get(file_id, None)
if duplicate:
files.remove(duplicate)
vprint("Duplicate found: %s and %s" % (full_path, duplicate))
return files
def run_clustalw(outpath, fasta_files):
aligned_files = []
clustalw_log = outpath + 'clustalw_log.txt'
clean = open(clustalw_log, 'w')
clean.close()
for fp in fasta_files:
try:
command = 'clustalw2 -INFILE=' + outpath+fp +\
' -ALIGN -OUTPUT=FASTA -OUTFILE=' + outpath+fp.split('/')[-1].replace('.fasta', '.aln')
with open(clustalw_log, 'a') as log:
log.write(fp + ' ' + command)
a = Popen(shlex.split(command), stdout=log, stderr=log)
a.wait()
aligned_files.append(fp.split('/')[-1].replace('.fasta', '.aln'))
except OSError:
try:
command = 'clustalw -INFILE=' + outpath+fp +\
' -ALIGN -OUTPUT=FASTA -OUTFILE=' + outpath+fp.split('/')[-1].replace('.fasta', '.aln')
with open(clustalw_log, 'a') as log:
log.write(fp + ' ' + command)
a = Popen(shlex.split(command), stdout=log, stderr=log)
a.wait()
aligned_files.append(fp.split('/')[-1].replace('.fasta', '.aln'))
except OSError:
print 'Clustalw not found'
raise
return aligned_files
def make_nexus(path, aligned_files, min_align_size, pick, nogaps, vprint):
alns = [al.split('/')[-1] for al in aligned_files]
size_dict = {}
old_alns = []
removed_alns = []
nexus_files = []
if pick:
if pick > len(alns):
pick = len(alns)
old_alns = alns
alns = sample(alns, pick)
map(old_alns.remove, alns)
else:
pick = len(alns)
assert len(old_alns) == 0
for i in range(pick):
infile = alns[i]
splited_infile = infile.split('.')
if len(splited_infile) > 2:
chromo = splited_infile[1]
outfile = infile.replace('.aln', '.nexus')
vprint('making nexus:', path + infile, path + outfile)
size, gaps = fastatonexus(path+infile, path+outfile, keep_gaps = not nogaps, vprint = vprint)
if size - gaps < min_align_size:
vprint(infile, 'removed due to small alignment size.')
removed_alns.append(infile)
if len(old_alns) > 0:
random_element = choice(old_alns)
vprint('Added ', random_element, 'to replace it.')
old_alns.remove(random_element)
alns.append(random_element)
else:
size_dict[infile] = size
nexus_files.append(outfile)
map(alns.remove, removed_alns)
vprint(str(len(alns)), 'filtered files.')
sizes = []
for f in sorted(alns):
sizes.append(size_dict[f])
return sizes, nexus_files
def join_fasta(path, aligned_files, outfile='all.fasta'):
aligned_files.sort()
with open(path + outfile, 'w') as fasta_out:
for f in aligned_files:
f = f.replace('.nexus', '.aln')
f_id = f.split('.')[0]
with open(path + f, 'r') as fasta:
for l in fasta:
if l.startswith('>'):
l = '>' + f_id + '_' + l[1:]
fasta_out.write(l)
def make_phylip(path, nexus_files, concat_outfile='all.phylip'):
nexus_files.sort()
with open(path + concat_outfile, 'w') as out:
for f in nexus_files:
f = f.replace('.nexus', '.aln')
outfile = '.'.join(f.split('.')[:-1]) + '.phylip'
seq = fasta2phylip(path+f, path+outfile)
out.write(seq)
def fasta2phylip(infile, outfile):
al_id = infile.split('.')[0].split('/')[-1]
seqs = []
with open(infile, 'r') as fasta:
seq = ''
sp_ids = []
for l in fasta:
if l.startswith('>'):
sp_ids.append('{}-AL'.format(l.strip()[1:11]))
#sp_ids.append('{}-AL{}'.format(l.strip()[1:11], al_id))
if seq:
seqs.append(seq)
seq = ''
else:
seq += l.strip()
seqs.append(seq)
lines = []
with open(outfile, 'w') as out:
first_line ='\n {} {}\n\n'.format(len(sp_ids), len(seqs[0]))
out.write(first_line)
lines.append(first_line)
for n, sp in enumerate(zip(sp_ids, seqs)):
line = sp[0]+'^'+str(n)+'\n'+ sp[1] + '\n'
out.write(line)
lines.append(line)
return ''.join(lines)
def join_nexus_by_chromo(path, aligned_files, min_align_size, nogaps, vprint):
chromo_dict = {}
for af in aligned_files:
chromo = af.split('.')[1]
if chromo in chromo_dict:
chromo_dict[chromo].append(af)
else:
chromo_dict[chromo] = [af]
for chromo in chromo_dict:
sizes, nexus_files = make_nexus(path, chromo_dict[chromo], min_align_size, None, nogaps, vprint)
join_nexus(path, sizes, nexus_files, 'all.%s.nexus'%chromo)
def join_nexus(path, sizes, nexus_files, outfile='all.nexus'):
soma_old = 0
soma = 0
append = '''
;
end;
begin mrbayes;
set autoclose=yes nowarn=yes;
outgroup 4;
[BEST uses taxset to define which allele belongs to which species.]
[for example, taxset H=1; says that the first sequence belongs to the species H]
taxset Gorilla = 1;
taxset Homo = 2;
taxset Pan = 3;
taxset Pongo = 4;
'''
for n, s in enumerate(sizes):
soma = soma_old + s
append += 'CHARSET gene' + str(n + 1) + ' = ' + str(soma_old + 1) + ' - ' + str(soma) + ';\n'
soma_old = soma
start = 'begin data;\n dimensions ntax=4 nchar=' + str(soma) + ';\n format datatype=DNA interleave missing=? gap=-;\n matrix\n'''
n = len(nexus_files)
append += 'partition Genes = ' + str(n) + ': '
for i in range(n):
append += 'gene' + str(i+1) + ', '
append = append[:-2] + ';\n'
append += '''set partition=Genes;
prset thetapr=invgamma(3,0.003) GeneMuPr=uniform(0.5,1.5) BEST=1;
unlink topology=(all) brlens=(all) genemu=(all);
mcmc ngen=5000000 nrun = 2 burnin=1000000 nchain = 2 samplefreq=100 ;
quit;
end;'''
seqs = ''
nexus_files.sort()
with open(path + outfile, 'w') as nex_out:
nex_out.write(start)
for f in nexus_files:
with open(path + f, 'r') as nex_in:
seqs = ''
for l in nex_in:
if l[:-1] == 'matrix':
l = nex_in.next()
while l[:-1] != ';':
seqs += l
l = nex_in.next()
nex_out.write(seqs)
nex_out.write(append)
def get_sizes(nexus_files):
sizes = []
for f in sorted(nexus_files):
i = AlignIO.read(f, 'nexus')
sizes.append(len(i[0]))
return sizes
def fastatonexus(infile, outfile, format_in = 'fasta', format_out = 'nexus', protein = False, keep_gaps = False, keep_n = False, vprint = lambda: None):
align = ''
gaps = 0
with open(infile, 'r') as handle:
i = AlignIO.read(handle, format_in)
columns = len(i[0])
for col in range(columns):
if keep_n or 'N' not in str(i[:,col:col+1]):
if '-' in str(i[:,col:col+1]):
gaps += 1
if keep_gaps:
if align:
align += i[:,col:col+1]
else:
align = i[:,col:col+1]
else:
if align:
align += i[:,col:col+1]
else:
align = i[:,col:col+1]
if not align:
vprint('No sequences found in', infile)
if keep_gaps:
size = align.get_alignment_length()
else:
size = align.get_alignment_length() + gaps
vprint(infile, 'alignment size', str(size))
align.sort()
align._alphabet = IUPAC.unambiguous_dna
with open(outfile, 'wb') as out:
try:
AlignIO.write(align, out, format_out)
except:
vprint('Error while saving nexus', infile)
raise
return size, gaps
if __name__ == '__main__':
args = argument_parser()
main(args)
|
igorrcosta/alfie
|
al_align.py
|
Python
|
gpl-2.0
| 22,717
|
[
"BLAST"
] |
630129b17d35144a280a72b651e87ae4010a375b2cdd342a99454db33107901f
|
# -*- coding: utf-8 -*-
## all SI units
########################################################################################
## Plot the membrane potential for a leaky integrate and fire neuron with current injection
## Author: Aditya Gilra
## Creation Date: 2012-06-08
## Modification Date: 2012-06-08
########################################################################################
import os
os.environ['NUMPTHREADS'] = '1'
import sys
sys.path.append('../../../python')
## simulation parameters
SIMDT = 5e-5 # seconds
PLOTDT = 5e-5 # seconds
RUNTIME = 2.0 # seconds
injectI = 1e-8#2.5e-12 # Amperes
## moose imports
import moose
from moose.neuroml import *
from moose.utils import * # has setupTable(), resetSim() etc
import math
## import numpy and matplotlib in matlab style commands
from pylab import *
def create_LIF():
neuromlR = NeuroML()
neuromlR.readNeuroMLFromFile('cells_channels/LIF.morph.xml')
libcell = moose.Neuron('/library/LIF')
LIFCellid = moose.copy(libcell,moose.Neutral('/cells'),'IF1')
LIFCell = moose.Neuron(LIFCellid)
return LIFCell
def run_LIF():
## reset and run the simulation
print "Reinit MOOSE."
## from moose_utils.py sets clocks and resets
resetSim(['/cells[0]'], SIMDT, PLOTDT, simmethod='ee')
print "Running now..."
moose.start(RUNTIME)
if __name__ == '__main__':
IF1 = create_LIF()
printCellTree(IF1)
IF1Soma = moose.element(IF1.path+'/soma_0') # moose.LIF instance
IF1Soma.inject = injectI
IF1vmTable = setupTable("vmTableIF1",IF1Soma,'Vm')
table_path = moose.Neutral(IF1Soma.path+'/data').path
IF1spikesTable = moose.Table(table_path+'/spikesTable')
moose.connect(IF1Soma,'spikeOut',IF1spikesTable,'input') ## spikeGen gives spiketimes
run_LIF()
print "Spiketimes :",IF1spikesTable.vector
## plot the membrane potential of the neuron
timevec = arange(0.0,RUNTIME+PLOTDT/2.0,PLOTDT)
## Something is crazy! Why twice the number of table entries compared to time!!??
figure(facecolor='w')
print IF1vmTable
plot(timevec, IF1vmTable.vector)
show()
|
h-mayorquin/camp_india_2016
|
tutorials/chemical switches/moose/neuroml/LIF/LIFxml_firing.py
|
Python
|
mit
| 2,100
|
[
"MOOSE",
"NEURON"
] |
c75e7e452312767d7613e76db0918c0eeb73da525b0a7c2f6ceba32984fd3954
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__doc__ = """
Generic Taskmaster module for the SCons build engine.
This module contains the primary interface(s) between a wrapping user
interface and the SCons build engine. There are two key classes here:
Taskmaster
This is the main engine for walking the dependency graph and
calling things to decide what does or doesn't need to be built.
Task
This is the base class for allowing a wrapping interface to
decide what does or doesn't actually need to be done. The
intention is for a wrapping interface to subclass this as
appropriate for different types of behavior it may need.
The canonical example is the SCons native Python interface,
which has Task subclasses that handle its specific behavior,
like printing "`foo' is up to date" when a top-level target
doesn't need to be built, and handling the -c option by removing
targets as its "build" action. There is also a separate subclass
for suppressing this output when the -q option is used.
The Taskmaster instantiates a Task object for each (set of)
target(s) that it decides need to be evaluated and/or built.
"""
__revision__ = "src/engine/SCons/Taskmaster.py 5134 2010/08/16 23:02:40 bdeegan"
from itertools import chain
import operator
import sys
import traceback
import SCons.Errors
import SCons.Node
import SCons.Warnings
StateString = SCons.Node.StateString
NODE_NO_STATE = SCons.Node.no_state
NODE_PENDING = SCons.Node.pending
NODE_EXECUTING = SCons.Node.executing
NODE_UP_TO_DATE = SCons.Node.up_to_date
NODE_EXECUTED = SCons.Node.executed
NODE_FAILED = SCons.Node.failed
# A subsystem for recording stats about how different Nodes are handled by
# the main Taskmaster loop. There's no external control here (no need for
# a --debug= option); enable it by changing the value of CollectStats.
CollectStats = None
class Stats(object):
"""
A simple class for holding statistics about the disposition of a
Node by the Taskmaster. If we're collecting statistics, each Node
processed by the Taskmaster gets one of these attached, in which case
the Taskmaster records its decision each time it processes the Node.
(Ideally, that's just once per Node.)
"""
def __init__(self):
"""
Instantiates a Taskmaster.Stats object, initializing all
appropriate counters to zero.
"""
self.considered = 0
self.already_handled = 0
self.problem = 0
self.child_failed = 0
self.not_built = 0
self.side_effects = 0
self.build = 0
StatsNodes = []
fmt = "%(considered)3d "\
"%(already_handled)3d " \
"%(problem)3d " \
"%(child_failed)3d " \
"%(not_built)3d " \
"%(side_effects)3d " \
"%(build)3d "
def dump_stats():
for n in sorted(StatsNodes, key=lambda a: str(a)):
print (fmt % n.stats.__dict__) + str(n)
class Task(object):
"""
Default SCons build engine task.
This controls the interaction of the actual building of node
and the rest of the engine.
This is expected to handle all of the normally-customizable
aspects of controlling a build, so any given application
*should* be able to do what it wants by sub-classing this
class and overriding methods as appropriate. If an application
needs to customze something by sub-classing Taskmaster (or
some other build engine class), we should first try to migrate
that functionality into this class.
Note that it's generally a good idea for sub-classes to call
these methods explicitly to update state, etc., rather than
roll their own interaction with Taskmaster from scratch.
"""
def __init__(self, tm, targets, top, node):
self.tm = tm
self.targets = targets
self.top = top
self.node = node
self.exc_clear()
def trace_message(self, method, node, description='node'):
fmt = '%-20s %s %s\n'
return fmt % (method + ':', description, self.tm.trace_node(node))
def display(self, message):
"""
Hook to allow the calling interface to display a message.
This hook gets called as part of preparing a task for execution
(that is, a Node to be built). As part of figuring out what Node
should be built next, the actually target list may be altered,
along with a message describing the alteration. The calling
interface can subclass Task and provide a concrete implementation
of this method to see those messages.
"""
pass
def prepare(self):
"""
Called just before the task is executed.
This is mainly intended to give the target Nodes a chance to
unlink underlying files and make all necessary directories before
the Action is actually called to build the targets.
"""
T = self.tm.trace
if T: T.write(self.trace_message(u'Task.prepare()', self.node))
# Now that it's the appropriate time, give the TaskMaster a
# chance to raise any exceptions it encountered while preparing
# this task.
self.exception_raise()
if self.tm.message:
self.display(self.tm.message)
self.tm.message = None
# Let the targets take care of any necessary preparations.
# This includes verifying that all of the necessary sources
# and dependencies exist, removing the target file(s), etc.
#
# As of April 2008, the get_executor().prepare() method makes
# sure that all of the aggregate sources necessary to build this
# Task's target(s) exist in one up-front check. The individual
# target t.prepare() methods check that each target's explicit
# or implicit dependencies exists, and also initialize the
# .sconsign info.
executor = self.targets[0].get_executor()
executor.prepare()
for t in executor.get_action_targets():
t.prepare()
for s in t.side_effects:
s.prepare()
def get_target(self):
"""Fetch the target being built or updated by this task.
"""
return self.node
def needs_execute(self):
# TODO(deprecate): "return True" is the old default behavior;
# change it to NotImplementedError (after running through the
# Deprecation Cycle) so the desired behavior is explicitly
# determined by which concrete subclass is used.
#raise NotImplementedError
msg = ('Taskmaster.Task is an abstract base class; instead of\n'
'\tusing it directly, '
'derive from it and override the abstract methods.')
SCons.Warnings.warn(SCons.Warnings.TaskmasterNeedsExecuteWarning, msg)
return True
def execute(self):
"""
Called to execute the task.
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
prepare(), executed() or failed().
"""
T = self.tm.trace
if T: T.write(self.trace_message(u'Task.execute()', self.node))
try:
everything_was_cached = 1
for t in self.targets:
if t.retrieve_from_cache():
# Call the .built() method without calling the
# .push_to_cache() method, since we just got the
# target from the cache and don't need to push
# it back there.
t.set_state(NODE_EXECUTED)
t.built()
else:
everything_was_cached = 0
break
if not everything_was_cached:
self.targets[0].build()
except SystemExit:
exc_value = sys.exc_info()[1]
raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code)
except SCons.Errors.UserError:
raise
except SCons.Errors.BuildError:
raise
except Exception, e:
buildError = SCons.Errors.convert_to_BuildError(e)
buildError.node = self.targets[0]
buildError.exc_info = sys.exc_info()
raise buildError
def executed_without_callbacks(self):
"""
Called when the task has been successfully executed
and the Taskmaster instance doesn't want to call
the Node's callback methods.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.executed_without_callbacks()',
self.node))
for t in self.targets:
if t.get_state() == NODE_EXECUTING:
for side_effect in t.side_effects:
side_effect.set_state(NODE_NO_STATE)
t.set_state(NODE_EXECUTED)
def executed_with_callbacks(self):
"""
Called when the task has been successfully executed and
the Taskmaster instance wants to call the Node's callback
methods.
This may have been a do-nothing operation (to preserve build
order), so we must check the node's state before deciding whether
it was "built", in which case we call the appropriate Node method.
In any event, we always call "visited()", which will handle any
post-visit actions that must take place regardless of whether
or not the target was an actual built target or a source Node.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.executed_with_callbacks()',
self.node))
for t in self.targets:
if t.get_state() == NODE_EXECUTING:
for side_effect in t.side_effects:
side_effect.set_state(NODE_NO_STATE)
t.set_state(NODE_EXECUTED)
t.push_to_cache()
t.built()
t.visited()
executed = executed_with_callbacks
def failed(self):
"""
Default action when a task fails: stop the build.
Note: Although this function is normally invoked on nodes in
the executing state, it might also be invoked on up-to-date
nodes when using Configure().
"""
self.fail_stop()
def fail_stop(self):
"""
Explicit stop-the-build failure.
This sets failure status on the target nodes and all of
their dependent parent nodes.
Note: Although this function is normally invoked on nodes in
the executing state, it might also be invoked on up-to-date
nodes when using Configure().
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.failed_stop()', self.node))
# Invoke will_not_build() to clean-up the pending children
# list.
self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
# Tell the taskmaster to not start any new tasks
self.tm.stop()
# We're stopping because of a build failure, but give the
# calling Task class a chance to postprocess() the top-level
# target under which the build failure occurred.
self.targets = [self.tm.current_top]
self.top = 1
def fail_continue(self):
"""
Explicit continue-the-build failure.
This sets failure status on the target nodes and all of
their dependent parent nodes.
Note: Although this function is normally invoked on nodes in
the executing state, it might also be invoked on up-to-date
nodes when using Configure().
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.failed_continue()', self.node))
self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
def make_ready_all(self):
"""
Marks all targets in a task ready for execution.
This is used when the interface needs every target Node to be
visited--the canonical example being the "scons -c" option.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.make_ready_all()', self.node))
self.out_of_date = self.targets[:]
for t in self.targets:
t.disambiguate().set_state(NODE_EXECUTING)
for s in t.side_effects:
# add disambiguate here to mirror the call on targets above
s.disambiguate().set_state(NODE_EXECUTING)
def make_ready_current(self):
"""
Marks all targets in a task ready for execution if any target
is not current.
This is the default behavior for building only what's necessary.
"""
T = self.tm.trace
if T: T.write(self.trace_message(u'Task.make_ready_current()',
self.node))
self.out_of_date = []
needs_executing = False
for t in self.targets:
try:
t.disambiguate().make_ready()
is_up_to_date = not t.has_builder() or \
(not t.always_build and t.is_up_to_date())
except EnvironmentError, e:
raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename)
if not is_up_to_date:
self.out_of_date.append(t)
needs_executing = True
if needs_executing:
for t in self.targets:
t.set_state(NODE_EXECUTING)
for s in t.side_effects:
# add disambiguate here to mirror the call on targets in first loop above
s.disambiguate().set_state(NODE_EXECUTING)
else:
for t in self.targets:
# We must invoke visited() to ensure that the node
# information has been computed before allowing the
# parent nodes to execute. (That could occur in a
# parallel build...)
t.visited()
t.set_state(NODE_UP_TO_DATE)
make_ready = make_ready_current
def postprocess(self):
"""
Post-processes a task after it's been executed.
This examines all the targets just built (or not, we don't care
if the build was successful, or even if there was no build
because everything was up-to-date) to see if they have any
waiting parent Nodes, or Nodes waiting on a common side effect,
that can be put back on the candidates list.
"""
T = self.tm.trace
if T: T.write(self.trace_message(u'Task.postprocess()', self.node))
# We may have built multiple targets, some of which may have
# common parents waiting for this build. Count up how many
# targets each parent was waiting for so we can subtract the
# values later, and so we *don't* put waiting side-effect Nodes
# back on the candidates list if the Node is also a waiting
# parent.
targets = set(self.targets)
pending_children = self.tm.pending_children
parents = {}
for t in targets:
# A node can only be in the pending_children set if it has
# some waiting_parents.
if t.waiting_parents:
if T: T.write(self.trace_message(u'Task.postprocess()',
t,
'removing'))
pending_children.discard(t)
for p in t.waiting_parents:
parents[p] = parents.get(p, 0) + 1
for t in targets:
for s in t.side_effects:
if s.get_state() == NODE_EXECUTING:
s.set_state(NODE_NO_STATE)
for p in s.waiting_parents:
parents[p] = parents.get(p, 0) + 1
for p in s.waiting_s_e:
if p.ref_count == 0:
self.tm.candidates.append(p)
for p, subtract in parents.items():
p.ref_count = p.ref_count - subtract
if T: T.write(self.trace_message(u'Task.postprocess()',
p,
'adjusted parent ref count'))
if p.ref_count == 0:
self.tm.candidates.append(p)
for t in targets:
t.postprocess()
# Exception handling subsystem.
#
# Exceptions that occur while walking the DAG or examining Nodes
# must be raised, but must be raised at an appropriate time and in
# a controlled manner so we can, if necessary, recover gracefully,
# possibly write out signature information for Nodes we've updated,
# etc. This is done by having the Taskmaster tell us about the
# exception, and letting
def exc_info(self):
"""
Returns info about a recorded exception.
"""
return self.exception
def exc_clear(self):
"""
Clears any recorded exception.
This also changes the "exception_raise" attribute to point
to the appropriate do-nothing method.
"""
self.exception = (None, None, None)
self.exception_raise = self._no_exception_to_raise
def exception_set(self, exception=None):
"""
Records an exception to be raised at the appropriate time.
This also changes the "exception_raise" attribute to point
to the method that will, in fact
"""
if not exception:
exception = sys.exc_info()
self.exception = exception
self.exception_raise = self._exception_raise
def _no_exception_to_raise(self):
pass
def _exception_raise(self):
"""
Raises a pending exception that was recorded while getting a
Task ready for execution.
"""
exc = self.exc_info()[:]
try:
exc_type, exc_value, exc_traceback = exc
except ValueError:
exc_type, exc_value = exc
exc_traceback = None
raise exc_type, exc_value, exc_traceback
class AlwaysTask(Task):
def needs_execute(self):
"""
Always returns True (indicating this Task should always
be executed).
Subclasses that need this behavior (as opposed to the default
of only executing Nodes that are out of date w.r.t. their
dependencies) can use this as follows:
class MyTaskSubclass(SCons.Taskmaster.Task):
needs_execute = SCons.Taskmaster.Task.execute_always
"""
return True
class OutOfDateTask(Task):
def needs_execute(self):
"""
Returns True (indicating this Task should be executed) if this
Task's target state indicates it needs executing, which has
already been determined by an earlier up-to-date check.
"""
return self.targets[0].get_state() == SCons.Node.executing
def find_cycle(stack, visited):
if stack[-1] in visited:
return None
visited.add(stack[-1])
for n in stack[-1].waiting_parents:
stack.append(n)
if stack[0] == stack[-1]:
return stack
if find_cycle(stack, visited):
return stack
stack.pop()
return None
class Taskmaster(object):
"""
The Taskmaster for walking the dependency DAG.
"""
def __init__(self, targets=[], tasker=None, order=None, trace=None):
self.original_top = targets
self.top_targets_left = targets[:]
self.top_targets_left.reverse()
self.candidates = []
if tasker is None:
tasker = OutOfDateTask
self.tasker = tasker
if not order:
order = lambda l: l
self.order = order
self.message = None
self.trace = trace
self.next_candidate = self.find_next_candidate
self.pending_children = set()
def find_next_candidate(self):
"""
Returns the next candidate Node for (potential) evaluation.
The candidate list (really a stack) initially consists of all of
the top-level (command line) targets provided when the Taskmaster
was initialized. While we walk the DAG, visiting Nodes, all the
children that haven't finished processing get pushed on to the
candidate list. Each child can then be popped and examined in
turn for whether *their* children are all up-to-date, in which
case a Task will be created for their actual evaluation and
potential building.
Here is where we also allow candidate Nodes to alter the list of
Nodes that should be examined. This is used, for example, when
invoking SCons in a source directory. A source directory Node can
return its corresponding build directory Node, essentially saying,
"Hey, you really need to build this thing over here instead."
"""
try:
return self.candidates.pop()
except IndexError:
pass
try:
node = self.top_targets_left.pop()
except IndexError:
return None
self.current_top = node
alt, message = node.alter_targets()
if alt:
self.message = message
self.candidates.append(node)
self.candidates.extend(self.order(alt))
node = self.candidates.pop()
return node
def no_next_candidate(self):
"""
Stops Taskmaster processing by not returning a next candidate.
Note that we have to clean-up the Taskmaster candidate list
because the cycle detection depends on the fact all nodes have
been processed somehow.
"""
while self.candidates:
candidates = self.candidates
self.candidates = []
self.will_not_build(candidates)
return None
def _validate_pending_children(self):
"""
Validate the content of the pending_children set. Assert if an
internal error is found.
This function is used strictly for debugging the taskmaster by
checking that no invariants are violated. It is not used in
normal operation.
The pending_children set is used to detect cycles in the
dependency graph. We call a "pending child" a child that is
found in the "pending" state when checking the dependencies of
its parent node.
A pending child can occur when the Taskmaster completes a loop
through a cycle. For example, lets imagine a graph made of
three node (A, B and C) making a cycle. The evaluation starts
at node A. The taskmaster first consider whether node A's
child B is up-to-date. Then, recursively, node B needs to
check whether node C is up-to-date. This leaves us with a
dependency graph looking like:
Next candidate \
\
Node A (Pending) --> Node B(Pending) --> Node C (NoState)
^ |
| |
+-------------------------------------+
Now, when the Taskmaster examines the Node C's child Node A,
it finds that Node A is in the "pending" state. Therefore,
Node A is a pending child of node C.
Pending children indicate that the Taskmaster has potentially
loop back through a cycle. We say potentially because it could
also occur when a DAG is evaluated in parallel. For example,
consider the following graph:
Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ...
| ^
| |
+----------> Node D (NoState) --------+
/
Next candidate /
The Taskmaster first evaluates the nodes A, B, and C and
starts building some children of node C. Assuming, that the
maximum parallel level has not been reached, the Taskmaster
will examine Node D. It will find that Node C is a pending
child of Node D.
In summary, evaluating a graph with a cycle will always
involve a pending child at one point. A pending child might
indicate either a cycle or a diamond-shaped DAG. Only a
fraction of the nodes ends-up being a "pending child" of
another node. This keeps the pending_children set small in
practice.
We can differentiate between the two cases if we wait until
the end of the build. At this point, all the pending children
nodes due to a diamond-shaped DAG will have been properly
built (or will have failed to build). But, the pending
children involved in a cycle will still be in the pending
state.
The taskmaster removes nodes from the pending_children set as
soon as a pending_children node moves out of the pending
state. This also helps to keep the pending_children set small.
"""
for n in self.pending_children:
assert n.state in (NODE_PENDING, NODE_EXECUTING), \
(str(n), StateString[n.state])
assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents))
for p in n.waiting_parents:
assert p.ref_count > 0, (str(n), str(p), p.ref_count)
def trace_message(self, message):
return 'Taskmaster: %s\n' % message
def trace_node(self, node):
return '<%-10s %-3s %s>' % (StateString[node.get_state()],
node.ref_count,
repr(str(node)))
def _find_next_ready_node(self):
"""
Finds the next node that is ready to be built.
This is *the* main guts of the DAG walk. We loop through the
list of candidates, looking for something that has no un-built
children (i.e., that is a leaf Node or has dependencies that are
all leaf Nodes or up-to-date). Candidate Nodes are re-scanned
(both the target Node itself and its sources, which are always
scanned in the context of a given target) to discover implicit
dependencies. A Node that must wait for some children to be
built will be put back on the candidates list after the children
have finished building. A Node that has been put back on the
candidates list in this way may have itself (or its sources)
re-scanned, in order to handle generated header files (e.g.) and
the implicit dependencies therein.
Note that this method does not do any signature calculation or
up-to-date check itself. All of that is handled by the Task
class. This is purely concerned with the dependency graph walk.
"""
self.ready_exc = None
T = self.trace
if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate'))
while True:
node = self.next_candidate()
if node is None:
if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
return None
node = node.disambiguate()
state = node.get_state()
# For debugging only:
#
# try:
# self._validate_pending_children()
# except:
# self.ready_exc = sys.exc_info()
# return node
if CollectStats:
if not hasattr(node, 'stats'):
node.stats = Stats()
StatsNodes.append(node)
S = node.stats
S.considered = S.considered + 1
else:
S = None
if T: T.write(self.trace_message(u' Considering node %s and its children:' % self.trace_node(node)))
if state == NODE_NO_STATE:
# Mark this node as being on the execution stack:
node.set_state(NODE_PENDING)
elif state > NODE_PENDING:
# Skip this node if it has already been evaluated:
if S: S.already_handled = S.already_handled + 1
if T: T.write(self.trace_message(u' already handled (executed)'))
continue
executor = node.get_executor()
try:
children = executor.get_all_children()
except SystemExit:
exc_value = sys.exc_info()[1]
e = SCons.Errors.ExplicitExit(node, exc_value.code)
self.ready_exc = (SCons.Errors.ExplicitExit, e)
if T: T.write(self.trace_message(' SystemExit'))
return node
except Exception, e:
# We had a problem just trying to figure out the
# children (like a child couldn't be linked in to a
# VariantDir, or a Scanner threw something). Arrange to
# raise the exception when the Task is "executed."
self.ready_exc = sys.exc_info()
if S: S.problem = S.problem + 1
if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e))
return node
children_not_visited = []
children_pending = set()
children_not_ready = []
children_failed = False
for child in chain(executor.get_all_prerequisites(), children):
childstate = child.get_state()
if T: T.write(self.trace_message(u' ' + self.trace_node(child)))
if childstate == NODE_NO_STATE:
children_not_visited.append(child)
elif childstate == NODE_PENDING:
children_pending.add(child)
elif childstate == NODE_FAILED:
children_failed = True
if childstate <= NODE_EXECUTING:
children_not_ready.append(child)
# These nodes have not even been visited yet. Add
# them to the list so that on some next pass we can
# take a stab at evaluating them (or their children).
children_not_visited.reverse()
self.candidates.extend(self.order(children_not_visited))
#if T and children_not_visited:
# T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited)))
# T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates)))
# Skip this node if any of its children have failed.
#
# This catches the case where we're descending a top-level
# target and one of our children failed while trying to be
# built by a *previous* descent of an earlier top-level
# target.
#
# It can also occur if a node is reused in multiple
# targets. One first descends though the one of the
# target, the next time occurs through the other target.
#
# Note that we can only have failed_children if the
# --keep-going flag was used, because without it the build
# will stop before diving in the other branch.
#
# Note that even if one of the children fails, we still
# added the other children to the list of candidate nodes
# to keep on building (--keep-going).
if children_failed:
for n in executor.get_action_targets():
n.set_state(NODE_FAILED)
if S: S.child_failed = S.child_failed + 1
if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
continue
if children_not_ready:
for child in children_not_ready:
# We're waiting on one or more derived targets
# that have not yet finished building.
if S: S.not_built = S.not_built + 1
# Add this node to the waiting parents lists of
# anything we're waiting on, with a reference
# count so we can be put back on the list for
# re-evaluation when they've all finished.
node.ref_count = node.ref_count + child.add_to_waiting_parents(node)
if T: T.write(self.trace_message(u' adjusted ref count: %s, child %s' %
(self.trace_node(node), repr(str(child)))))
if T:
for pc in children_pending:
T.write(self.trace_message(' adding %s to the pending children set\n' %
self.trace_node(pc)))
self.pending_children = self.pending_children | children_pending
continue
# Skip this node if it has side-effects that are
# currently being built:
wait_side_effects = False
for se in executor.get_action_side_effects():
if se.get_state() == NODE_EXECUTING:
se.add_to_waiting_s_e(node)
wait_side_effects = True
if wait_side_effects:
if S: S.side_effects = S.side_effects + 1
continue
# The default when we've gotten through all of the checks above:
# this node is ready to be built.
if S: S.build = S.build + 1
if T: T.write(self.trace_message(u'Evaluating %s\n' %
self.trace_node(node)))
# For debugging only:
#
# try:
# self._validate_pending_children()
# except:
# self.ready_exc = sys.exc_info()
# return node
return node
return None
def next_task(self):
"""
Returns the next task to be executed.
This simply asks for the next Node to be evaluated, and then wraps
it in the specific Task subclass with which we were initialized.
"""
node = self._find_next_ready_node()
if node is None:
return None
tlist = node.get_executor().get_all_targets()
task = self.tasker(self, tlist, node in self.original_top, node)
try:
task.make_ready()
except:
# We had a problem just trying to get this task ready (like
# a child couldn't be linked in to a VariantDir when deciding
# whether this node is current). Arrange to raise the
# exception when the Task is "executed."
self.ready_exc = sys.exc_info()
if self.ready_exc:
task.exception_set(self.ready_exc)
self.ready_exc = None
return task
def will_not_build(self, nodes, node_func=lambda n: None):
"""
Perform clean-up about nodes that will never be built. Invokes
a user defined function on all of these nodes (including all
of their parents).
"""
T = self.trace
pending_children = self.pending_children
to_visit = set(nodes)
pending_children = pending_children - to_visit
if T:
for n in nodes:
T.write(self.trace_message(' removing node %s from the pending children set\n' %
self.trace_node(n)))
try:
while len(to_visit):
node = to_visit.pop()
node_func(node)
# Prune recursion by flushing the waiting children
# list immediately.
parents = node.waiting_parents
node.waiting_parents = set()
to_visit = to_visit | parents
pending_children = pending_children - parents
for p in parents:
p.ref_count = p.ref_count - 1
if T: T.write(self.trace_message(' removing parent %s from the pending children set\n' %
self.trace_node(p)))
except KeyError:
# The container to_visit has been emptied.
pass
# We have the stick back the pending_children list into the
# taskmaster because the python 1.5.2 compatibility does not
# allow us to use in-place updates
self.pending_children = pending_children
def stop(self):
"""
Stops the current build completely.
"""
self.next_candidate = self.no_next_candidate
def cleanup(self):
"""
Check for dependency cycles.
"""
if not self.pending_children:
return
nclist = [(n, find_cycle([n], set())) for n in self.pending_children]
genuine_cycles = [
node for node,cycle in nclist
if cycle or node.get_state() != NODE_EXECUTED
]
if not genuine_cycles:
# All of the "cycles" found were single nodes in EXECUTED state,
# which is to say, they really weren't cycles. Just return.
return
desc = 'Found dependency cycle(s):\n'
for node, cycle in nclist:
if cycle:
desc = desc + " " + " -> ".join(map(str, cycle)) + "\n"
else:
desc = desc + \
" Internal Error: no cycle found for node %s (%s) in state %s\n" % \
(node, repr(node), StateString[node.get_state()])
raise SCons.Errors.UserError(desc)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
xifle/greensc
|
tools/scons/scons-local-2.0.1/SCons/Taskmaster.py
|
Python
|
gpl-3.0
| 39,297
|
[
"VisIt"
] |
86b08e20852f5ae82ed530927dacde5f9dda349d95d765892bdaf0433ad54696
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
This module define the various drones used to assimilate data.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 18, 2012"
import abc
import os
import re
import glob
import logging
import fnmatch
import json
import six
from six.moves import zip
from monty.io import zopen
from pymatgen.io.vaspio.vasp_input import Incar, Potcar, Poscar
from pymatgen.io.vaspio.vasp_output import Vasprun, Oszicar
from pymatgen.io.gaussianio import GaussianOutput
from pymatgen.entries.computed_entries import ComputedEntry, \
ComputedStructureEntry
from pymatgen.serializers.json_coders import PMGSONable
logger = logging.getLogger(__name__)
class AbstractDrone(six.with_metaclass(abc.ABCMeta, PMGSONable)):
"""
Abstract drone class that defines the various methods that must be
implemented by drones. Because of the quirky nature of Python"s
multiprocessing, the intermediate data representations has to be in the
form of python primitives. So all objects that drones work with must be
PMGSONable. All drones must also implement the standard PMGSONable as_dict() and
from_dict API.
"""
@abc.abstractmethod
def assimilate(self, path):
"""
Assimilate data in a directory path into a pymatgen object. Because of
the quirky nature of Python"s multiprocessing, the object must support
pymatgen"s as_dict() for parallel processing.
Args:
path: directory path
Returns:
An assimilated object
"""
return
@abc.abstractmethod
def get_valid_paths(self, path):
"""
Checks if path contains valid data for assimilation, and then returns
the valid paths. The paths returned can be a list of directory or file
paths, depending on what kind of data you are assimilating. For
example, if you are assimilating VASP runs, you are only interested in
directories containing vasprun.xml files. On the other hand, if you are
interested converting all POSCARs in a directory tree to cifs for
example, you will want the file paths.
Args:
path: input path as a tuple generated from os.walk, i.e.,
(parent, subdirs, files).
Returns:
List of valid dir/file paths for assimilation
"""
return
class VaspToComputedEntryDrone(AbstractDrone):
"""
VaspToEntryDrone assimilates directories containing vasp output to
ComputedEntry/ComputedStructureEntry objects. There are some restrictions
on the valid directory structures:
1. There can be only one vasp run in each directory.
2. Directories designated "relax1", "relax2" are considered to be 2 parts
of an aflow style run, and only "relax2" is parsed.
3. The drone parses only the vasprun.xml file.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the Vasprun object. See
:class:`pymatgen.io.vaspio.Vasprun`. If parameters == None,
a default set of parameters that are necessary for typical
post-processing will be set.
data (list): Output data to include. Has to be one of the properties
supported by the Vasprun object.
"""
def __init__(self, inc_structure=False, parameters=None, data=None):
self._inc_structure = inc_structure
self._parameters = {"is_hubbard", "hubbards", "potcar_symbols",
"run_type"}
if parameters:
self._parameters.update(parameters)
self._data = data if data else []
def assimilate(self, path):
files = os.listdir(path)
if "relax1" in files and "relax2" in files:
filepath = glob.glob(os.path.join(path, "relax2",
"vasprun.xml*"))[0]
else:
vasprun_files = glob.glob(os.path.join(path, "vasprun.xml*"))
filepath = None
if len(vasprun_files) == 1:
filepath = vasprun_files[0]
elif len(vasprun_files) > 1:
"""
This is a bit confusing, since there maybe be multi-steps. By
default, assimilate will try to find a file simply named
vasprun.xml, vasprun.xml.bz2, or vasprun.xml.gz. Failing which
it will try to get a relax2 from an aflow style run if
possible. Or else, a randomly chosen file containing
vasprun.xml is chosen.
"""
for fname in vasprun_files:
if os.path.basename(fname) in ["vasprun.xml",
"vasprun.xml.gz",
"vasprun.xml.bz2"]:
filepath = fname
break
if re.search("relax2", fname):
filepath = fname
break
filepath = fname
try:
vasprun = Vasprun(filepath)
except Exception as ex:
logger.debug("error in {}: {}".format(filepath, ex))
return None
entry = vasprun.get_computed_entry(self._inc_structure,
parameters=self._parameters,
data=self._data)
entry.parameters["history"] = _get_transformation_history(path)
return entry
def get_valid_paths(self, path):
(parent, subdirs, files) = path
if "relax1" in subdirs and "relax2" in subdirs:
return [parent]
if (not parent.endswith("/relax1")) and \
(not parent.endswith("/relax2")) and \
len(glob.glob(os.path.join(parent, "vasprun.xml*"))) > 0:
return [parent]
return []
def __str__(self):
return " VaspToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure,
"parameters": self._parameters,
"data": self._data},
"version": __version__,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
class SimpleVaspToComputedEntryDrone(VaspToComputedEntryDrone):
"""
A simpler VaspToComputedEntryDrone. Instead of parsing vasprun.xml, it
parses only the INCAR, POTCAR, OSZICAR and KPOINTS files, which are much
smaller and faster to parse. However, much fewer properties are available
compared to the standard VaspToComputedEntryDrone.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries. Structure will be parsed from the CONTCAR.
"""
def __init__(self, inc_structure=False):
self._inc_structure = inc_structure
self._parameters = {"is_hubbard", "hubbards", "potcar_symbols",
"run_type"}
def assimilate(self, path):
files = os.listdir(path)
try:
files_to_parse = {}
if "relax1" in files and "relax2" in files:
for filename in ("INCAR", "POTCAR", "POSCAR"):
search_str = os.path.join(path, "relax1", filename + "*")
files_to_parse[filename] = glob.glob(search_str)[0]
for filename in ("CONTCAR", "OSZICAR"):
search_str = os.path.join(path, "relax2", filename + "*")
files_to_parse[filename] = glob.glob(search_str)[-1]
else:
files_to_parse["INCAR"] = glob.glob(os.path.join(path,
"INCAR*"))[0]
files_to_parse["POTCAR"] = glob.glob(
os.path.join(path, "POTCAR*"))[-1]
for filename in ("CONTCAR", "OSZICAR", "POSCAR"):
files = glob.glob(os.path.join(path, filename + "*"))
if len(files) == 1:
files_to_parse[filename] = files[0]
elif len(files) > 1:
"""
This is a bit confusing, since there maybe be
multiple steps. By default, assimilate will try to find
a file simply named filename, filename.bz2, or
filename.gz. Failing which it will try to get a relax2
from a custodian double relaxation style run if
possible. Or else, a random file is chosen.
"""
for fname in files:
if fnmatch.fnmatch(os.path.basename(fname),
"{}(\.gz|\.bz2)*"
.format(filename)):
files_to_parse[filename] = fname
break
if fname == "POSCAR" and \
re.search("relax1", fname):
files_to_parse[filename] = fname
break
if (fname in ("CONTCAR", "OSZICAR") and
re.search("relax2", fname)):
files_to_parse[filename] = fname
break
files_to_parse[filename] = fname
poscar = Poscar.from_file(files_to_parse["POSCAR"])
contcar = Poscar.from_file(files_to_parse["CONTCAR"])
param = {}
incar = Incar.from_file(files_to_parse["INCAR"])
if "LDAUU" in incar:
param["hubbards"] = dict(zip(poscar.site_symbols,
incar["LDAUU"]))
else:
param["hubbards"] = {}
param["is_hubbard"] = (incar.get("LDAU", False) and
sum(param["hubbards"].values()) > 0)
param["run_type"] = "GGA+U" if param["is_hubbard"] else "GGA"
param["history"] = _get_transformation_history(path)
potcar = Potcar.from_file(files_to_parse["POTCAR"])
param["potcar_symbols"] = potcar.symbols
oszicar = Oszicar(files_to_parse["OSZICAR"])
energy = oszicar.final_energy
structure = contcar.structure
initial_vol = poscar.structure.volume
final_vol = contcar.structure.volume
delta_volume = (final_vol / initial_vol - 1)
data = {"filename": path, "delta_volume": delta_volume}
if self._inc_structure:
entry = ComputedStructureEntry(structure, energy,
parameters=param,
data=data)
else:
entry = ComputedEntry(structure.composition, energy,
parameters=param, data=data)
return entry
except Exception as ex:
logger.debug("error in {}: {}".format(path, ex))
return None
def __str__(self):
return "SimpleVaspToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure},
"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
class GaussianToComputedEntryDrone(AbstractDrone):
"""
GaussianToEntryDrone assimilates directories containing Gaussian output to
ComputedEntry/ComputedStructureEntry objects. By default, it is assumed
that Gaussian output files have a ".log" extension.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the GaussianOutput object. See
:class:`pymatgen.io.gaussianio GaussianOutput`. The parameters
have to be one of python"s primitive types, i.e., list, dict of
strings and integers. If parameters == None, a default set of
parameters will be set.
data (list): Output data to include. Has to be one of the properties
supported by the GaussianOutput object. The parameters have to
be one of python"s primitive types, i.e. list, dict of strings
and integers. If data == None, a default set will be set.
file_extensions (list):
File extensions to be considered as Gaussian output files.
Defaults to just the typical "log" extension.
.. note::
Like the GaussianOutput class, this is still in early beta.
"""
def __init__(self, inc_structure=False, parameters=None, data=None,
file_extensions=(".log",)):
self._inc_structure = inc_structure
self._parameters = {"functional", "basis_set", "charge", "spin_mult",
"route"}
if parameters:
self._parameters.update(parameters)
self._data = {"stationary_type", "properly_terminated"}
if data:
self._data.update(data)
self._file_extensions = file_extensions
def assimilate(self, path):
try:
gaurun = GaussianOutput(path)
except Exception as ex:
logger.debug("error in {}: {}".format(path, ex))
return None
param = {}
for p in self._parameters:
param[p] = getattr(gaurun, p)
data = {}
for d in self._data:
data[d] = getattr(gaurun, d)
if self._inc_structure:
entry = ComputedStructureEntry(gaurun.final_structure,
gaurun.final_energy,
parameters=param,
data=data)
else:
entry = ComputedEntry(gaurun.final_structure.composition,
gaurun.final_energy, parameters=param,
data=data)
return entry
def get_valid_paths(self, path):
(parent, subdirs, files) = path
return [os.path.join(parent, f) for f in files
if os.path.splitext(f)[1] in self._file_extensions]
def __str__(self):
return " GaussianToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure,
"parameters": self._parameters,
"data": self._data,
"file_extensions": self._file_extensions},
"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
def _get_transformation_history(path):
"""
Checks for a transformations.json* file and returns the history.
"""
trans_json = glob.glob(os.path.join(path, "transformations.json*"))
if trans_json:
try:
with zopen(trans_json[0]) as f:
return json.load(f)["history"]
except:
return None
return None
|
yanikou19/pymatgen
|
pymatgen/apps/borg/hive.py
|
Python
|
mit
| 16,065
|
[
"Gaussian",
"VASP",
"pymatgen"
] |
4fae4580993f57322633d730fdbb36cf6d5ac21bce539e1e138466e6bdcf8c4a
|
# ksp-compiler - a compiler for the Kontakt script language
# Copyright (C) 2011 Nils Liberg
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version:
# http://www.gnu.org/licenses/gpl-2.0.html
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from ksp_ast import *
from ksp_ast_processing import ASTVisitor, ASTModifier, flatten
import ksp_builtins
import re
import math
symbol_table = {}
nckp_table = []
user_defined_functions = {}
key_ids = {}
pgs_functions = set(['_pgs_create_key', '_pgs_key_exists', '_pgs_set_key_val', '_pgs_get_key_val',
'pgs_create_key', 'pgs_key_exists', 'pgs_set_key_val', 'pgs_get_key_val',
'pgs_create_str_key', 'pgs_str_key_exists', 'pgs_set_str_key_val', 'pgs_get_str_key_val'])
mark_constant_re = re.compile(r'MARK_\d+')
def clear_symbol_table():
symbol_table.clear()
key_ids.clear()
user_defined_functions.clear()
def add_nckp_var_to_nckp_table(nckp_ui_variable):
nckp_table.append(nckp_ui_variable.lower())
class ValueUndefinedException(ParseException):
def __init__(self, node, msg='Value of variable undefined'):
ParseException.__init__(self, node, msg)
class Variable:
def __init__(self, name, size=1, params=None, control_type=None, is_constant=False, is_polyphonic=False, value=None):
self.name = name
self.size = size
self.params = params or []
self.control_type = control_type
self.is_constant = is_constant
self.is_polyphonic = is_polyphonic
self.value = value
def move_on_init_first(module):
on_init_blocks = [b for b in module.blocks if isinstance(b, Callback) and b.name == 'init']
if on_init_blocks:
on_init = on_init_blocks[0]
module.blocks.remove(on_init)
module.blocks.insert(0, on_init)
def toint(i, bits=32):
' converts to a signed integer with bits bits '
i &= (1 << bits) - 1 # get last "bits" bits, as unsigned
if i & (1 << (bits - 1)): # is negative in N-bit 2's comp
i -= 1 << bits # ... so make it negative
return int(i)
def sign(x):
if x < 0:
return -1
elif x > 0:
return 1
else:
return 0
def assert_numeric(x):
if type(x) not in (int, Decimal):
raise ValueUndefinedException(x, 'Numeric value expected.')
def normalize_numeric(x):
# constrain integers to the range of a 32-bit signed int
if type(x) is int:
return toint(x)
# ... and leave real numbers as they are
else:
return x
def evaluate_expression(expr):
if isinstance(expr, BinOp):
# TODO: handle Decimal numbers here:
a, b = evaluate_expression(expr.left), evaluate_expression(expr.right)
op = expr.op
if op in ['+', '-', '*', '/', '<', '<=', '>', '>=', '=', '#']:
#a, b = int(a), int(b)
assert_numeric(a)
assert_numeric(b)
if op == '+':
return normalize_numeric(a + b)
elif op == '-':
return normalize_numeric(a - b)
elif op == '*':
return normalize_numeric(a * b)
elif op == '/':
if type(a) is int and type(b) is int:
# division with truncation:
return int(math.copysign(abs(a) // abs(b), a / b)) # a // b yields the wrong result in case of negative numbers, eg. -10/9
else:
return a / b
elif op == '=':
# TODO: check if Kontakt treats 4.0 as equal to 4
return a == b
elif op == '<':
return a < b
elif op == '<=':
return a <= b
elif op == '>':
return a > b
elif op == '>=':
return a >= b
elif op == '#':
# TODO: check if Kontakt treats 4.0 as different than 4
return a != b
elif op in ['.and.', '.or.', 'mod']:
a, b = toint(a), toint(b)
if op == '.and.':
return a & b
elif op == '.or.':
return a | b
elif op == 'mod':
result = abs(a) % abs(b)
if a < 0:
return -result
else:
return result
elif op == '&':
return str(a) + str(b)
elif op in ['and', 'or']:
a, b = bool(a), bool(b)
#if type(a) is not bool:
# raise ParseException(expr.left, 'Boolean expected.')
#if type(b) is not bool:
# raise ParseException(expr.right, 'Boolean expected.')
if op == 'and':
return a and b
else:
return a or b
elif isinstance(expr, UnaryOp):
#a = int(evaluate_expression(expr.right))
a = evaluate_expression(expr.right)
if expr.op == '-':
return normalize_numeric(-a)
elif expr.op == '.not.':
return toint(0xFFFFFFFF ^ a)
elif isinstance(expr, Integer) or isinstance(expr, String) or isinstance(expr, Boolean) or isinstance(expr, Real):
return expr.value
elif isinstance(expr, VarRef):
name = str(expr.identifier)
if name.lower() not in symbol_table:
raise ParseException(expr, 'Variable not declared: %s' % name)
value = symbol_table[name.lower()].value
if value is None:
raise ValueUndefinedException(expr)
if len(expr.subscripts) > 1:
raise ParseException(expr, 'More than one subscript: %s' % str(expr))
if expr.subscripts:
subscript = int(evaluate_expression(expr.subscripts[0]))
else:
subscript = None
if (expr.identifier.prefix in '%!?') != (subscript is not None):
raise ParseException(expr, 'Use of subscript wrong.')
if subscript:
if 0 <= subscript < len(value):
return value[subscript]
else:
# WARNING: index out of bounds
return 0
else:
return value
elif isinstance(expr, FunctionCall):
name = str(expr.function_name)
parameters = [evaluate_expression(param) for param in expr.parameters]
funcs2numparameters = {'abs': 1, 'in_range': 3, 'sh_left': 2, 'sh_right': 2, 'by_marks': 1, 'int_to_real': 1, 'real_to_int': 1}
if name in list(funcs2numparameters.keys()):
if len(parameters) != funcs2numparameters[name]:
raise ParseException(expr, 'Wrong number of parameters to %s' % name)
if name == 'abs':
return abs(parameters[0])
elif name == 'in_range':
return parameters[1] <= parameters[0] <= parameters[2]
elif name == 'sh_left':
return toint(parameters[0] << (parameters[1] % 32))
elif name == 'sh_right':
return toint(parameters[0] >> (parameters[1] % 32))
elif name == 'by_marks': # TODO: check if this can be removed
return toint(parameters[0] | 0x80000000)
elif name == 'int_to_real':
return Decimal(toint(parameters[0]))
elif name == 'real_to_int':
return toint(int(parameters[0]))
raise ValueUndefinedException(expr, 'Constant value expected.')
def assert_type(node, type):
''' verify that <node> has a type that matches (is compatible with) <type> '''
if node is None:
node_type = 'None'
raise Exception()
node_type = node.type
if node_type != type and not (node_type in ('integer', 'real') and type == 'numeric'):
raise ParseException(node, 'Expected expression of %s type, got %s.' % (type, node_type))
def highest_precision(type1, type2):
if type1 == 'real' or type2 == 'real':
return 'real'
else:
return 'integer'
class ASTVisitorDetermineExpressionTypes(ASTVisitor):
def __init__(self, ast):
ASTVisitor.__init__(self)
self.traverse(ast)
def visitFunctionCall(self, parent, node, *args):
self.visit_children(parent, node, *args)
function_name = node.function_name.identifier
if function_name in ksp_builtins.function_signatures:
params, return_type = ksp_builtins.function_signatures[function_name]
if type:
node.type = return_type
else:
node.type = 'undefined'
passed_params = node.parameters
if len(passed_params) != len(params):
raise ParseException(node, 'Wrong number of parameters for %s: expected %d, got %d' % (function_name, len(params), len(passed_params)))
for (param_descriptor, passed_param) in zip(params, passed_params):
param_descriptor = param_descriptor.replace('<', '').replace('>', '')
is_text = 'text' in param_descriptor or param_descriptor.endswith('name') or param_descriptor.endswith('-path')
if not is_text:
if function_name == 'abs' and passed_param.type in ('integer', 'real'):
# special case: the abs function returns an integer or real
# depending on what param type it's given
node.type = passed_param.type
elif 'array-or-string-array-variable' in param_descriptor:
pass
elif 'string-array' in param_descriptor:
assert_type(passed_param, 'array of string')
elif 'array-variable' in param_descriptor:
assert_type(passed_param, 'array of integer')
elif 'real-variable' in param_descriptor:
assert_type(passed_param, 'array of real')
elif 'key-id' in param_descriptor:
if not isinstance(passed_param, VarRef):
raise ParseException(node, 'Expected key-id.')
passed_param.type = 'key-id'
elif 'real-value' in param_descriptor:
assert_type(passed_param, 'real')
elif not 'variable' in param_descriptor:
assert_type(passed_param, 'integer')
return False
def visitBinOp(self, parent, expr, *args):
self.visit_children(parent, expr, *args)
#print expr.left.type, expr.op, expr.right.type
if expr.op == '&':
expr.type = 'string'
elif expr.op in ('+', '-', '*', '/'):
assert_type(expr.left, 'numeric')
assert_type(expr.right, 'numeric')
expr.type = highest_precision(expr.left.type, expr.right.type)
elif expr.op in ('mod', '.and.', '.or.'):
assert_type(expr.left, 'integer')
assert_type(expr.right, 'integer')
expr.type = 'integer'
elif expr.op in '< <= > >= = #':
assert_type(expr.left, 'numeric')
assert_type(expr.right, 'numeric')
expr.type = 'boolean'
elif expr.op in 'and or':
assert_type(expr.left, 'boolean')
assert_type(expr.right, 'boolean')
expr.type = 'boolean'
else:
raise Exception()
if expr.op in '+ - * / < <= > >= = #' and expr.left.type != expr.right.type:
raise ParseException(expr, 'Operands are of different types: %s and %s. Please use real_to_int(...) or int_to_real(...) functions to explicitly cast the type.' % (expr.left.type, expr.right.type))
return False
def visitUnaryOp(self, parent, expr, *args):
self.visit_children(parent, expr, *args)
if expr.op == '-':
assert_type(expr.right, 'numeric')
expr.type = expr.right.type
elif expr.op == '.not.':
assert_type(expr.right, 'integer')
expr.type = 'integer'
elif expr.op == 'not':
assert_type(expr.right, 'boolean')
expr.type = 'boolean'
return False
def visitInteger(self, parent, expr, *args):
expr.type = 'integer'
def visitReal(self, parent, expr, *args):
expr.type = 'real'
def visitString(self, parent, expr, *args):
expr.type = 'string'
def visitRawArrayInitializer(self, parent, expr, *args):
expr.type = 'array of integer'
def visitID(self, parent, expr, *args):
if expr.prefix:
expr.type = {'$': 'integer',
'%': 'array of integer',
'@': 'string',
'!': 'array of string',
'?': 'array of real',
'~': 'real'}[expr.prefix]
else:
expr.type = 'integer' # function return value
def visitVarRef(self, parent, expr, *args):
self.visit_children(parent, expr, *args)
if expr.subscripts:
assert_type(expr.subscripts[0], 'integer')
if not expr.identifier.type.startswith('array'):
raise ParseException(expr.identifier, 'Expected array')
# an added subscript turns eg. an array of integer into just an integer
expr.type = expr.identifier.type.replace('array of ', '')
else:
expr.type = expr.identifier.type
return False
class ASTVisitorCheckNoEmptyIfCaseStatements(ASTVisitor):
def __init__(self, ast):
ASTVisitor.__init__(self, visit_expressions=False)
self.traverse(ast)
def visitIfStmt(self, parent, node, *args):
(condition, stmts) = node.condition_stmts_tuples[0]
if len(stmts) == 0:
raise ParseException(node, "Warning: due to a KSP bug, an empty 'if' statement is equivalent to invoking the exit function. Please make sure the body of your 'if' statement is not empty!")
def visitSelectStmt(self, parent, node, *args):
for ((start, stop), stmts) in node.range_stmts_tuples:
if len(stmts) == 0:
raise ParseException(start, "Warning: due to a KSP bug, an empty 'case' statement is equivalent to invoking the exit function. Please make sure the body of your 'case' statement is not empty!")
class ASTVisitorCheckStatementExprTypes(ASTVisitor):
def __init__(self, ast):
ASTVisitor.__init__(self, visit_expressions=False)
self.traverse(ast)
def visitDeclareStmt(self, parent, node, *args):
if node.initial_value and not (type(node.initial_value) is list):
assert_type(node.initial_value, node.variable.type)
if node.size:
assert_type(node.size, 'integer')
def visitAssignStmt(self, parent, node, *args):
# assigning an integer to a string variable is ok, so don't treat that as an error
try:
if not (node.expression and node.expression.type in ('integer', 'real') and node.varref.type == 'string'):
assert_type(node.expression, node.varref.type)
except ParseException as e:
raise ParseException(node.varref, e.msg)
def visitWhileStmt(self, parent, node, *args):
assert_type(node.condition, 'boolean')
def visitForStmt(self, parent, node, *args):
assert_type(node.loopvar, 'numeric')
assert_type(node.start, 'numeric')
assert_type(node.end, 'numeric')
if node.step:
assert_type(node.step, 'numeric')
def visitIfStmt(self, parent, node, *args):
for (condition, stmts) in node.condition_stmts_tuples:
if condition:
assert_type(condition, 'boolean')
def visitSelectStmt(self, parent, node, *args):
for ((start, stop), stmts) in node.range_stmts_tuples:
# TODO: check if real numbers are allowed here
assert_type(start, 'integer')
if stop:
assert_type(stop, 'integer')
class ASTVisitorFindUsedVariables(ASTVisitor):
def __init__(self, ast, used_variables_set):
ASTVisitor.__init__(self)
self.used_variables = used_variables_set
self.traverse(ast)
def visitDeclareStmt(self, parent, node, *args):
for child in node.get_childnodes()[1:]: # visit all children but the first one (the declared variable)
self.dispatch(node, child, *args)
return False
def visitID(self, parent, node, *args):
self.used_variables.add(str(node).lower())
return False
class ASTVisitorFindUsedFunctions(ASTVisitor):
def __init__(self, ast, used_functions):
ASTVisitor.__init__(self, visit_expressions=False)
self.call_graph = {}
self.traverse(ast)
self.mark_used_functions_using_depth_first_traversal(self.call_graph, visited=used_functions)
def visitFunctionDef(self, parent, node):
self.visit_children(parent, node, node.name.identifier)
return False
def visitCallback(self, parent, node):
self.visit_children(parent, node, None)
return False
def visitFunctionCall(self, parent, node, top_level):
target = node.function_name.identifier
if node.using_call_keyword:
source = top_level
if not source in self.call_graph:
self.call_graph[source] = []
if not target in self.call_graph:
self.call_graph[target] = []
self.call_graph[source].append(target)
return False
def mark_used_functions_using_depth_first_traversal(self, call_graph, start_node=None, visited=None):
''' Make a depth-first traversal of call graph and set the used attribute of functions invoked directly or indirectly from some callback.
The graph is represented by a dictionary where graph[f1] == f1 means that the function with name f1 calls the function with name f2 (the names are strings).'''
if visited is None:
visited = set()
nodes_to_visit = set()
if start_node is None:
nodes_to_visit = set(call_graph.get(None, [])) # None represents the source of a normal callback (a callback invoking a function as opposed to a function invoking a function)
else:
if start_node not in visited:
visited.add(start_node)
nodes_to_visit = set([x for x in call_graph[start_node] if x is not None])
for n in nodes_to_visit:
self.mark_used_functions_using_depth_first_traversal(call_graph, n, visited)
class ASTVisitorCheckDeclarations(ASTVisitor):
def __init__(self, ast):
ASTVisitor.__init__(self)
self.traverse(ast)
def assert_true(self, condition, node, msg):
if not condition:
raise ParseException(node, msg)
def visitFunctionCall(self, parent, node, *args):
function_name = node.function_name.identifier
if function_name in pgs_functions:
for child in node.get_childnodes()[1:]: # visit all children but the first one (the key-id)
self.dispatch(node, child, *args)
return False
def visitFunctionDef(self, parent, node, *args):
if node.name.identifier in user_defined_functions:
raise ParseException(node, 'There is already a variable/function defined with the same name')
user_defined_functions[node.name.identifier] = node
return True
def visitDeclareStmt(self, parent, node, *args):
name = str(node.variable)
is_ui_control = [x for x in node.modifiers if x.startswith('ui_')]
if is_ui_control:
self.assert_true(not 'const' in node.modifiers, node, 'A UI control cannot be constant')
self.assert_true(not 'polyphonic' in node.modifiers, node, 'A UI control cannot be polyphonic')
if 'ui_label' in node.modifiers:
self.assert_true(node.parameters and len(node.parameters) == 2, node, 'Expected two parameters')
elif 'ui_button' in node.modifiers or 'ui_menu' in node.modifiers or 'ui_switch' in node.modifiers:
self.assert_true(not node.parameters, node, "Syntax error, didn't expect any parameter")
elif 'ui_slider' in node.modifiers:
self.assert_true(node.parameters and len(node.parameters) == 2, node, 'Expected two parameters: min, max')
elif 'ui_knob' in node.modifiers or 'ui_value_edit' in node.modifiers:
self.assert_true(node.parameters and len(node.parameters) == 3, node, 'Expected three parameters: min, max, scale')
elif 'ui_table' in node.modifiers:
self.assert_true(node.parameters and len(node.parameters) == 3, node, 'Expected three parameters: width, height, max')
elif 'ui_waveform' in node.modifiers:
self.assert_true(node.parameters and len(node.parameters) == 2, node, 'Expected two parameters: width, height')
if name.lower() in symbol_table:
raise ParseException(node.variable, 'Redeclaration of %s' % name)
if node.size:
try:
size = evaluate_expression(node.size)
except ValueUndefinedException:
raise ParseException(node.size, 'Array size is non-constant or uses undefined variables')
else:
size = 1
initial_value = None
if 'const' in node.modifiers:
# First need to check if the initial value is an NI constant
init_expr = node.initial_value
if not (isinstance(init_expr, VarRef) and str(init_expr.identifier).upper() in ksp_builtins.variables):
if not node.initial_value:
raise ParseException(node.variable, 'A constant value has to be assigned to the constant')
try:
initial_value = evaluate_expression(node.initial_value)
except ValueUndefinedException:
raise ParseException(node.initial_value, 'Expression uses non-constant values or undefined constant variables')
try:
params = []
for param in node.parameters:
# VALUE_EDIT_MODE_NOTE_NAMES is used in declare statements, but don't force a known value to evaluate to when it's used as a param
if True or isinstance(param, VarRef) and (param.identifier.prefix+param.identifier.identifier in ['VALUE_EDIT_MODE_NOTE_NAMES', '$VALUE_EDIT_MODE_NOTE_NAMES']
or param.identifier.prefix+param.identifier.identifier in ksp_builtins.variables):
params.append(param)
else:
params.append(evaluate_expression(param))
except ValueUndefinedException:
raise ParseException(node, 'Expression uses non-constant values or undefined constant variables')
#name, size=1, params=None, control_type=None, is_constant=False, value=None):
if is_ui_control:
control_type = is_ui_control[0]
else:
control_type = None
is_constant = ('const' in node.modifiers and initial_value is not None)
is_polyphonic = 'polyphonic' in node.modifiers
symbol_table[name.lower()] = Variable(node.variable, size, params, control_type, is_constant, is_polyphonic, initial_value)
self.visit_children(parent, node, *args)
return False
def visitID(self, parent, node, *args):
name = str(node)
special_names = ['NO_SYS_SCRIPT_RLS_TRIG', 'NO_SYS_SCRIPT_PEDAL', 'NO_SYS_SCRIPT_GROUP_START', 'NO_SYS_SCRIPT_ALL_NOTES_OFF']
if not name in ksp_builtins.variables and not name in ksp_builtins.functions and not name.lower() in symbol_table and not name in special_names and not name in user_defined_functions and not name.lower() in nckp_table:
raise ParseException(node, 'Undeclared variable/function: %s' % name)
class ASTModifierSimplifyExpressions(ASTModifier):
def __init__(self, module_ast, replace_constants=True):
ASTModifier.__init__(self)
self.replace_constants = replace_constants
self.traverse(module_ast)
def evaluate_expression_or_same(self, expr):
if expr is None:
return None
try:
result = evaluate_expression(expr)
if type(result) is int and not isinstance(expr, Integer):
return Integer(expr.lexinfo, result)
if type(result) is Decimal and not isinstance(expr, Real):
return Real(expr.lexinfo, result)
if type(result) is bool and not isinstance(expr, Boolean):
return Boolean(expr.lexinfo, result)
except SyntaxError:
pass
return expr
def modifyDeclareStmt(self, node):
ASTModifier.modifyDeclareStmt(self, node)
return [node]
# The below code seemed to be intended to clear out const declare statements for replacement purposes, but it is unnecessary.
# if 'const' in node.modifiers and self.replace_constants:
# return []
# else:
# return [node]
def modifyBinOp(self, node):
node = ASTModifier.modifyBinOp(self, node)
node.left = self.evaluate_expression_or_same(node.left)
node.right = self.evaluate_expression_or_same(node.right)
if node.op == '*':
if isinstance(node.left, (Integer, Real)):
if node.left.value == 0:
return node.left
elif node.left.value == 1:
return node.right
if isinstance(node.right, (Integer, Real)):
if node.right.value == 0:
return node.right
elif node.right.value == 1:
return node.left
if node.op == '+':
if isinstance(node.left, (Integer, Real)):
if node.left.value == 0:
return node.right
if isinstance(node.right, (Integer, Real)):
if node.right.value == 0:
return node.left
if node.op == 'or':
if isinstance(node.left, Boolean):
if node.left.value:
return node.left
else:
return node.right
elif isinstance(node.right, Boolean):
if node.right.value:
return node.right
else:
return node.left
elif node.op == 'and':
if isinstance(node.left, Boolean):
if node.left.value:
return node.right
else:
return node.left
elif isinstance(node.right, Boolean):
if node.right.value:
return node.left
else:
return node.right
return self.evaluate_expression_or_same(node)
def modifyUnaryOp(self, node):
node = ASTModifier.modifyUnaryOp(self, node)
node.right = self.evaluate_expression_or_same(node.right)
return self.evaluate_expression_or_same(node)
def modifyVarRef(self, node):
node = ASTModifier.modifyVarRef(self, node)
if self.replace_constants and not mark_constant_re.match(node.identifier.identifier): # MARK_%d constants are included in the symbol table in order to be possible to use on declaration lines, don't replace them with their values
return self.evaluate_expression_or_same(node)
else:
return node
def modifyExpr(self, node):
if isinstance(node, BinOp):
node.left = self.evaluate_expression_or_same(node.left)
node.right = self.evaluate_expression_or_same(node.right)
elif isinstance(node, UnOp):
node.right = self.evaluate_expression_or_same(node.right)
return self.evaluate_expression_or_same(node)
def modifyIfStmt(self, node, *args, **kwargs):
# don't simplify the condition of "if 1=1" statements
temp = []
for i, (condition, stmts) in enumerate(node.condition_stmts_tuples):
if (isinstance(condition, BinOp) and
isinstance(condition.left, Integer) and
isinstance(condition.right, Integer) and
i == 0 and
condition.left.value == 1 and condition.right.value == 1):
pass
else:
condition = self.modify(condition, *args, **kwargs)
stmts = flatten([self.modify(s, *args, **kwargs) for s in stmts])
temp.append((condition, stmts))
if not temp:
return []
else:
node.condition_stmts_tuples = temp
return [node]
class ASTModifierRemoveUnusedBranches(ASTModifier):
def __init__(self, module_ast):
ASTModifier.__init__(self)
self.traverse(module_ast)
def is1equals1(self, node):
return isinstance(node, BinOp) and isinstance(node.left, Integer) and isinstance(node.right, Integer) and node.left.value == 1 and node.right.value == 1
def modifyIfStmt(self, node):
statements = ASTModifier.modifyIfStmt(self, node)
if len(statements) == 1:
node = statements[0]
condition_stmts_tuples = []
for (i, (condition, stmts)) in enumerate(node.condition_stmts_tuples):
try:
value = None
if condition:
value = evaluate_expression(condition)
except ParseException:
pass
if value is True:
# since the condition is always true it can be replaced with None,
# but don't do this for if 1=1 statements since they are used as a workaround for the Kontakt 2 parser buffer overflow
if not self.is1equals1(condition):
condition = None
if len(stmts) > 0:
condition_stmts_tuples.append((condition, stmts))
break
if not (value is False or len(stmts) == 0):
condition_stmts_tuples.append((condition, stmts))
# if there's just an else statement left, return its statement list
if len(condition_stmts_tuples) == 1 and condition_stmts_tuples[0][0] is None:
return condition_stmts_tuples[0][1]
elif len(condition_stmts_tuples) == 0:
return []
else:
node.condition_stmts_tuples = condition_stmts_tuples
return [node]
else:
return flatten([self.modify(stmt) for stmt in statements])
def modifySelectStmt(self, node):
statements = ASTModifier.modifySelectStmt(self, node)
if len(statements) == 1:
node = statements[0]
try:
value = evaluate_expression(node.expression)
if value is None:
return [node]
for ((start, stop), stmts) in node.range_stmts_tuples:
start = evaluate_expression(start)
stop = evaluate_expression(stop)
if (stop is not None and start <= value <= stop) or (start == value):
return stmts
except ParseException:
pass
return [node]
else:
return flatten([self.modify(stmt) for stmt in statements])
def modifyWhileStmt(self, node):
statements = ASTModifier.modifyWhileStmt(self, node)
if len(statements) == 1:
node = statements[0]
try:
value = evaluate_expression(node.condition)
if value is False:
return []
except ParseException:
pass
return [node]
else:
return flatten([self.modify(stmt) for stmt in statements])
class ASTModifierRemoveUnusedFunctions(ASTModifier):
def __init__(self, module_ast, used_functions):
ASTModifier.__init__(self, modify_expressions=False)
self.used_functions = used_functions
self.traverse(module_ast)
def modifyModule(self, node, *args, **kwargs):
''' only keep used functions '''
node.blocks = [b for b in node.blocks if isinstance(b, Callback) or b.name.identifier in self.used_functions]
class ASTModifierRemoveUnusedVariables(ASTModifier):
def __init__(self, module_ast, used_variables):
ASTModifier.__init__(self)
self.used_variables = used_variables
self.traverse(module_ast)
def modifyDeclareStmt(self, node):
''' only keep used variables '''
statements = ASTModifier.modifyDeclareStmt(self, node)
if len(statements) == 1:
node = statements[0]
is_ui_variable = node.modifiers is not None and any([m.lower().startswith('ui_') for m in node.modifiers])
if not str(node.variable).lower() in self.used_variables and not is_ui_variable:
return []
else:
return [node]
else:
return flatten([self.modify(stmt) for stmt in statements])
class ASTModifierFixCallbug(ASTModifier):
def __init__(self, module_ast, used_variables):
ASTModifier.__init__(self, modify_expressions=False)
self.dummy_assign = None # dummy assignment statement, eg. $i = $i (using some variable $i that it finds in the script)
self.pass_num = 1
self.traverse(module_ast)
self.pass_num = 2
self.traverse(module_ast)
def modifyDeclareStmt(self, node):
# try to find some variable to use in dummy assignment
if self.dummy_assign is None and self.pass_num == 1 and node.variable.type == 'integer' and 'const' not in node.modifiers:
lexinfo = node.lexinfo
varref = VarRef(lexinfo, node.variable)
self.dummy_assign = AssignStmt(lexinfo, varref, varref)
return [node]
def fixStatementList(self, statements):
# add a dummy assignment to the end of the list if the last element is a function call using "call"
result = statements[:]
if len(statements) > 0 and isinstance(statements[-1], FunctionCall) and statements[-1].using_call_keyword:
if self.dummy_assign:
result.append(self.dummy_assign)
else:
raise ParseException(statements[-1], 'The compiler needs to add a dummy assignment (eg. $x := $x) after this line, but could not find any integer variables to use for this purpose. Please declare one.')
return result
def modifyIfStmt(self, node):
if self.pass_num == 2:
node = ASTModifier.modifyIfStmt(self, node)[0]
node.condition_stmts_tuples = [(condition, self.fixStatementList(stmts)) for (condition, stmts) in node.condition_stmts_tuples]
return [node]
def modifyWhileStmt(self, node):
if self.pass_num == 2:
node = ASTModifier.modifyWhileStmt(self, node)[0]
node.statements = self.fixStatementList(node.statements)
return [node]
def modifySelectStmt(self, node):
if self.pass_num == 2:
node = ASTModifier.modifySelectStmt(self, node)[0]
node.range_stmts_tuples = [(range, self.fixStatementList(statements)) for (range, statements) in node.range_stmts_tuples]
return [node]
def modifyFunctionDef(self, node):
if self.pass_num == 2:
node = ASTModifier.modifyFunctionDef(self, node)
node.lines = self.fixStatementList(node.lines)
return node
def check_code(module, optimize=False, check_empty_compound_statements=False, call_bug_work_around=True):
clear_symbol_table()
used_variables = set()
if optimize:
rescaler = 1/0.60
else:
rescaler = 1.0
yield ('progress', 'checking types', int(40*rescaler))
ASTVisitorDetermineExpressionTypes(module)
yield ('progress', 'checking types', int(50*rescaler))
ASTVisitorCheckStatementExprTypes(module)
yield ('progress', 'checking declarations', int(60*rescaler))
ASTVisitorCheckDeclarations(module)
##if call_bug_work_around:
## yield ('progress', 'automatically introducing work-around for call bug', int(68*rescaler))
## ASTModifierFixCallbug(module, used_variables)
if optimize:
yield ('progress', 'optimizing - simplying expressions', 70)
ASTModifierSimplifyExpressions(module, replace_constants=True)
yield ('progress', 'optimizing - removing unused code branches', 80)
ASTModifierRemoveUnusedBranches(module)
yield ('progress', 'optimizing - removing unused variables', 90)
ASTVisitorFindUsedVariables(module, used_variables)
yield ('progress', 'optimizing - removing unused variables', 95)
ASTModifierRemoveUnusedVariables(module, used_variables)
if check_empty_compound_statements:
yield ('progress', 'checking existance of empty compound statements', 98)
ASTVisitorCheckNoEmptyIfCaseStatements(module)
yield ('completed', module)
|
nojanath/SublimeKSP
|
ksp_compiler3/ksp_compiler_extras.py
|
Python
|
gpl-3.0
| 37,664
|
[
"VisIt"
] |
4cce4b77397348a8c33ac567e45e9281a5ce3a689d1de72c652a9a748e188205
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals
from __future__ import absolute_import
"""
This module provides classes for the Piezoelectric tensor
"""
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.elasticity.tensors import TensorBase
from pymatgen.analysis.elasticity import voigt_map
import numpy as np
import warnings
from six.moves import range
__author__ = "Shyam Dwaraknath"
__copyright__ = "Copyright 2016, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyam Dwaraknath"
__email__ = "shyamd@lbl.gov"
__status__ = "Development"
__date__ = "Feb, 2016"
class PiezoTensor(TensorBase):
"""
This class describes the 3x6 piezo tensor in Voigt-notation
"""
def __new__(cls, input_array, tol=1e-3):
"""
Create an PiezoTensor object. The constructor throws an error if
the shape of the input_matrix argument is not 3x3x3, i. e. in true
tensor notation. Note that the constructor uses __new__ rather than
__init__ according to the standard method of subclassing numpy
ndarrays.
Args:
input_matrix (3x3x3 array-like): the 3x6 array-like
representing the piezo tensor
"""
obj = TensorBase(input_array).view(cls)
if obj.shape != (3, 3, 3):
raise ValueError("Default piezo tensor constructor requires "
"input argument to be the true 3x3x3 "
"array. To construct from a 3x6 array, use "
"PiezoTensor.from_voigt")
if not (obj - np.transpose(obj, (0, 2, 1)) < tol).all():
warnings.warn("Input piezo tensor does "
"not satisfy standard symmetries")
return obj
|
aykol/pymatgen
|
pymatgen/analysis/piezo.py
|
Python
|
mit
| 1,957
|
[
"pymatgen"
] |
8b1a4dabb6f3904a696afd506e49fc96cce9d38f0ae69070a050b044a63b6244
|
#!/usr/bin/env python
import argparse
from datetime import datetime
from pathlib import Path
import os
import re
import fflogs
import encounter_tools as e_tools
def load_timeline(timeline):
"""Loads a timeline file into a list of entry dicts"""
timelist = []
with timeline as file:
for line in file:
# Ignore comments, alertall, hideall, etc by
# only reading lines starting with a number
if not line[0].isdigit():
continue
entry = {}
# Remove trailing comment, if any,
# then split the line into sections
cleaned_line = e_tools.clean_tl_line(line)
match = e_tools.split_tl_line(cleaned_line)
if not match:
continue
entry["time"] = float(match.group("time"))
entry["label"] = match.group("label")
# Get the sync format into the file format
sync_match = e_tools.is_tl_line_syncmatch(match)
if not sync_match:
continue
entry["branch"] = 0
ability_match = e_tools.is_tl_line_cast(sync_match.group(1))
if ability_match:
entry["regex"] = "2[12]\|[^\|]*\|........\|{}\|{}\|".format(
ability_match.group("source"), ability_match.group("id")
)
# Special casing on syncs
entry["special_type"] = False
begincast_match = e_tools.is_tl_line_begincast(sync_match.group(1))
if begincast_match:
entry["special_type"] = "begincast"
entry["special_line"] = "20"
entry["cast_id"] = begincast_match.group("id")
entry["caster_name"] = begincast_match.group("source")
entry["regex"] = "20\|[^\|]*\|........\|{}\|{}\|".format(
begincast_match.group("source"), begincast_match.group("id")
)
buff_match = e_tools.is_tl_line_buff(sync_match.group(1))
if buff_match:
entry["special_type"] = "applydebuff"
entry["special_line"] = "26"
entry["buff_target"] = buff_match.group("target")
entry["buff_name"] = buff_match.group("effect")
entry["regex"] = "26\|[^\|]*\|{}\|{}\|[^\|]*\|[^\|]*\|[^\|]*\|[^\|]*\|{}\|".format(
buff_match.group("effectId"),
buff_match.group("effect"),
buff_match.group("target"),
)
log_match = e_tools.is_tl_line_log(sync_match.group(1))
if log_match:
entry["special_type"] = "battlelog"
entry["special_line"] = "00"
entry["logid"] = log_match.group("id")
entry["line"] = log_match.group("message")
entry["regex"] = "00\|[^\|]*\|{}\|{}\|{}".format(
log_match.group("id"), log_match.group("entity"), log_match.group("message")
)
add_match = e_tools.is_tl_line_adds(sync_match.group(1))
if add_match:
entry["special_type"] = "addlog"
entry["special_line"] = "03"
entry["name"] = add_match.group("entity")
entry["regex"] = "03\|[^\|]*\|........\|{}".format(add_match.group("entity"))
headmarker_match = e_tools.is_tl_line_headmarker(sync_match.group(1))
if headmarker_match:
entry["special_type"] = "headmarker"
entry["special_line"] = "27"
entry["headmarker_target"] = headmarker_match.group("target")
entry["regex"] = "27\|[^\|]*\|........\|{}\|....\|....\|{}".format(
headmarker_match.group("target"), headmarker_match.group("id")
)
# If we're here and we're missing a regex type, just hope for the best
if "regex" not in entry:
entry["regex"] = sync_match.group(1).replace(":", "\|")
# Get the start and end of the sync window
window_match = re.search(r"window ([\d\.]+),?([\d\.]+)?", match.group("options"))
if window_match:
pre_window = float(window_match.group(1))
if window_match.group(2) is not None:
post_window = float(window_match.group(2))
else:
post_window = pre_window
else:
pre_window = 2.5
post_window = 2.5
entry["start"] = max(0, entry["time"] - pre_window)
entry["end"] = entry["time"] + post_window
# Get the jump time, if any
jump_match = re.search(r"jump ([\d\.]+)", match.group("options"))
if jump_match:
entry["jump"] = float(jump_match.group(1))
# Add completed entry to the timelist
timelist.append(entry)
return timelist
def parse_report(args):
"""Reads an fflogs report and return a list of entries"""
# Default values
report_start_time = 0
start_time = 0
end_time = 0
enemies = {}
# Get report information
report_data = fflogs.api("fights", args.report, "www", {"api_key": args.key})
report_start_time = report_data["start"]
# Get the start and end timestamps for the specific fight
fight_id_found = False
for fight in report_data["fights"]:
if args.fight and fight["id"] == args.fight:
start_time = fight["start_time"]
end_time = fight["end_time"]
fight_id_found = True
break
elif fight["end_time"] - fight["start_time"] > end_time - start_time:
start_time = fight["start_time"]
end_time = fight["end_time"]
if args.fight and not fight_id_found:
raise Exception("Fight ID not found in report")
# Build an enemy name list, since these aren't in the events
for enemy in report_data["enemies"]:
enemies[enemy["id"]] = enemy["name"]
# Get the actual event list for the single fight
options = {
"api_key": args.key,
"start": start_time,
"end": end_time,
"filter": '(source.disposition="enemy" and (type="cast" or type="begincast")) or (target.disposition="enemy" and source.disposition!="friendly" and type="applydebuff")',
"translate": "true",
}
event_data = fflogs.api("events", args.report, "www", options)
entries = []
# Actually make the entry dicts
for event in event_data["events"]:
entry = {
"time": datetime.fromtimestamp((report_start_time + event["timestamp"]) / 1000),
"ability_id": hex(event["ability"]["guid"])[2:].upper(),
"ability_name": event["ability"]["name"],
"type": event["type"],
}
# In the applydebuff case, the source is -1 (environment) and we want the target instead
if event["type"] == "applydebuff":
entry["combatant"] = enemies[event["targetID"]]
elif "sourceID" in event:
entry["combatant"] = enemies[event["sourceID"]]
else:
entry["combatant"] = ""
entries.append(entry)
return entries, datetime.fromtimestamp((report_start_time + start_time) / 1000)
def get_regex(event):
"""Gets the regex for the event for both file and report types"""
if isinstance(event, str):
return event
elif isinstance(event, dict):
return event["regex"]
def get_type(event):
"""Gets the line type for both file and report types"""
if isinstance(event, str):
if event.startswith("20"):
return "begincast"
elif event.startswith("26"):
return "applydebuff"
elif event.startswith("21") or event.startswith("22"):
return "cast"
elif event.startswith("00"):
return "battlelog"
elif event.startswith("03"):
return "addlog"
elif event.startswith("27"):
return "headmarker"
else:
return "none"
elif isinstance(event, dict):
return event["type"]
# In case event is a different type
return "none"
def test_match(event, entry):
# Normal case. Exclude begincast to avoid false positive match with cast events
if (
"regex" in entry
and re.search(entry["regex"], get_regex(event))
and not entry["special_type"]
and get_type(event) != "begincast"
):
return True
# File parsing cases
if isinstance(event, str) and entry["special_type"]:
# Begincast case
if entry["special_type"] == "begincast" and event.startswith(entry["special_line"]):
begincast_match = re.search(entry["regex"], event)
if begincast_match:
return True
else:
return False
# Buff case
elif entry["special_type"] == "applydebuff" and event.startswith(entry["special_line"]):
# Matching this format generically:
# |Dadaluma Simulation|0.00|E0000000||4000AE96|Guardian
buff_match = re.search(entry["regex"], event, re.IGNORECASE)
if buff_match:
return True
else:
return False
# Battlelog case
elif entry["special_type"] == "battlelog" and event.startswith(entry["special_line"]):
# Matching this format generically:
# 00|2019-01-12T18:08:14.0000000-05:00|0839||The Realm of the Machinists will be sealed off in 15 seconds!|
log_match = re.search(entry["regex"], event, re.IGNORECASE,)
if log_match:
return True
else:
return False
# Added combatant case
elif entry["special_type"] == "addlog" and event.startswith(entry["special_line"]):
# Matching this format generically:
# 03|2019-01-12T18:07:46.6390000-05:00|40002269|Mustadio|0|46|dfa2|2ee0|0|0||dc029b852788abdd6056147620d2193c
add_match = re.search(entry["regex"], event)
if add_match:
return True
else:
return False
# Head marker case
elif entry["special_type"] == "headmarker" and event.startswith(entry["special_line"]):
marker_match = re.search(entry["regex"], event,)
if marker_match:
return True
else:
return False
# Report parsing cases
elif isinstance(event, dict) and entry["special_type"] == event["type"]:
# Begincast case
if event["type"] == "begincast":
if re.search(entry["cast_id"], event["ability_id"]) and re.search(
entry["caster_name"], event["combatant"]
):
return True
else:
return False
# Buff case
elif event["type"] == "applydebuff":
if re.search(entry["buff_target"], event["combatant"]) and re.search(
entry["buff_name"], event["ability_name"]
):
return True
else:
return False
# If none of the above have matched, there's no match
return False
def check_event(event, timelist, state):
# Get amount of time that's passed since last sync point
if state["timeline_stopped"]:
time_progress_seconds = 0
else:
event_time = e_tools.parse_event_time(event)
if event_time > state["last_sync_timestamp"]:
time_progress_delta = e_tools.parse_event_time(event) - state["last_sync_timestamp"]
time_progress_seconds = (
time_progress_delta.seconds + time_progress_delta.microseconds / 1000000
)
else:
# battle logs have out of order parsed times because their
# microseconds are zero. Just pretend this is 0.
time_progress_seconds = 0
# Get where the timeline would be at this time
timeline_position = state["last_sync_position"] + time_progress_seconds
# Search timelist for matches
for entry in timelist:
match = test_match(event, entry)
if match and timeline_position >= entry["start"] and timeline_position <= entry["end"]:
# Flag with current branch
if state["last_entry"] == entry and time_progress_seconds < 2.5:
continue
entry["branch"] = state["branch"]
state["last_entry"] = entry
# Check the timeline drift for anomolous timings
drift = entry["time"] - timeline_position
entry_text = "{:.3f}: Matched entry: {} {} ({:+.3f}s)".format(
timeline_position, entry["time"], entry["label"], drift
)
if args.drift_max > abs(drift) > args.drift_failure:
print(e_tools.color_fail(entry_text))
elif args.drift_failure > abs(drift) > args.drift_warning:
print(e_tools.color_warn(entry_text))
else:
print(entry_text)
if time_progress_seconds > 30:
print(" Warning: {:.3f}s since last sync".format(time_progress_seconds))
# Find any syncs before this one that were passed without syncing
if not state["timeline_stopped"]:
for other_entry in timelist:
if (
"regex" in other_entry
and other_entry["time"] > state["last_jump"]
and other_entry["time"] < entry["time"]
and other_entry["branch"] < entry["branch"]
):
if "last" in other_entry and drift < 999:
print(
" Missed sync: {} at {} (last seen at {})".format(
other_entry["label"], other_entry["time"], other_entry["last"]
)
)
elif drift < 999:
print(
" Missed sync: {} at {}".format(
other_entry["label"], other_entry["time"]
)
)
# If this is a sync from a large window, ignore missed syncs
other_entry["branch"] = state["branch"]
# Carry out the sync to make this the new baseline position
if state["timeline_stopped"]:
state["last_jump"] = entry["time"]
state["timeline_stopped"] = False
state["last_sync_timestamp"] = e_tools.parse_event_time(event)
# Jump to new time, stopping if necessary
if "jump" in entry:
if entry["jump"] == 0:
print("---!Resetting encounter from {}!---".format(state["last_sync_position"]))
state["timeline_stopped"] = True
else:
print(" Jumping to {:.3f}".format(entry["jump"]))
state["last_jump"] = entry["jump"]
state["last_sync_position"] = entry["jump"]
state["branch"] += 1
else:
state["last_sync_position"] = entry["time"]
# Record last seen data if it matches but outside window
elif match:
entry["last"] = timeline_position
return state
def run_file(args, timelist):
"""Runs a timeline against a specified file"""
state = {
"file": True,
"last_entry": False,
"last_sync_position": 0,
"last_jump": 0,
"branch": 1,
"timeline_stopped": True,
}
started = False
encounter_sets = []
with args.file as file:
# If searching for encounters, divert and find start/end first.
if args.search_fights:
encounter_sets = e_tools.find_fights_in_file(file)
# If all we want to do is list encounters, stop here and give to the user.
if args.search_fights < 0:
return e_tools.list_fights_in_file(args, encounter_sets)
start_time, end_time = e_tools.choose_fight_times(args, encounter_sets)
# Scan the file until the start timestamp
for line in file:
# Scan the file until the start timestamp
if not started and line[14:26] != start_time:
continue
if line[14:26] == end_time:
break
# We're at the start of the encounter now.
if not started:
started = True
state["last_sync_timestamp"] = e_tools.parse_event_time(line)
state = check_event(line, timelist, state)
if not started:
raise Exception("Fight start not found")
def run_report(args, timelist):
"""Runs a timeline against a specified FFlogs report"""
# Reuse the parse_report functionality to get the entry list
events, start_time = parse_report(args)
# Add in the log string to search for
for event in events:
event["regex"] = "|{}|{}|".format(event["combatant"], event["ability_id"])
# Set up state. timeline_stopped will never be True with reports
state = {
"file": False,
"last_entry": False,
"last_sync_position": 0,
"last_sync_timestamp": start_time,
"last_jump": 0,
"branch": 1,
"timeline_stopped": False,
}
for event in events:
state = check_event(event, timelist, state)
def main(args):
if args.search_fights and args.search_fights == -1:
return run_file(args, None)
# Parse timeline file
timelist = load_timeline(args.timeline)
if args.file:
run_file(args, timelist)
elif args.report:
print("Running analysis based on report. Caveats apply.")
run_report(args, timelist)
def timeline_file(filename):
"""Defines the timeline file argument type"""
data_path = Path(__file__).resolve().parent.parent / "ui" / "raidboss" / "data"
# Allow for just specifying the base filename, e.g. "o12s.txt" or "o12s"
if not os.path.exists(filename):
for root, _, files in os.walk(data_path):
if filename in files:
filename = os.path.join(root, filename)
break
if "%s.txt" % filename in files:
filename = os.path.join(root, "%s.txt" % filename)
break
path = Path(filename)
if not path.exists():
raise argparse.ArgumentTypeError("Could not load timeline: %s" % filename)
else:
return path.open()
if __name__ == "__main__":
# Set up all of the arguments
example_usage = """
example:
make_timeline.py -f "%APPDATA%\\Advanced Combat Tracker\\FFXIVLogs\\Network_20180206.log" -s 12:30:45.156 -e 12:43:51.395 -t ultima_weapon_ultimate
Scans Network_20180206.log, starts the encounter at 12:30:45.156, and crawls until
12:43:51.395, comparing against the ultima_weapon_ultimate timeline"""
parser = argparse.ArgumentParser(
description="Creates a timeline from a logged encounter",
epilog=example_usage,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
# Add main input vector, fflogs report or network log file
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-r", "--report", help="The ID of an FFLogs report")
group.add_argument(
"-f",
"--file",
type=argparse.FileType("r", encoding="utf8"),
help="The path of the log file",
)
# Report arguments
parser.add_argument(
"-k",
"--key",
help="The FFLogs API key to use, from https://www.fflogs.com/accounts/changeuser",
)
parser.add_argument(
"-rf",
"--fight",
type=int,
help="Fight ID of the report to use. Defaults to longest in the report",
)
# Log file arguments
parser.add_argument(
"-s",
"--start",
type=e_tools.timestamp_type,
help="Timestamp of the start, e.g. '12:34:56.789",
)
parser.add_argument(
"-e", "--end", type=e_tools.timestamp_type, help="Timestamp of the end, e.g. '12:34:56.789"
)
parser.add_argument(
"-lf",
"--search-fights",
nargs="?",
const=-1,
type=int,
help="Encounter in log to use, e.g. '1'. If no number is specified, returns a list of encounters.",
)
# Filtering arguments
parser.add_argument(
"-t",
"--timeline",
type=timeline_file,
help="The filename of the timeline to test against, e.g. ultima_weapon_ultimate",
)
# Output Format arguments
parser.add_argument(
"-df",
"--drift-failure",
nargs="?",
default=1,
type=float,
help="If an entry misses its timestamp by more than this value in seconds, it is displayed in red. Defaults to 1.",
)
parser.add_argument(
"-dw",
"--drift-warning",
nargs="?",
default=0.2,
type=float,
help="If an entry misses its timestamp by more than this value in seconds, it is displayed in yellow. Defaults to 0.2.",
)
parser.add_argument(
"-dm",
"--drift-max",
nargs="?",
default=10,
type=float,
help="If an entry misses its timestamp by more than this value in seconds, it is assumed to be a jump and will not be highlighted. Defaults to 10.",
)
args = parser.parse_args()
# Check dependent args
if args.search_fights and not args.file:
raise parser.error("Automatic encounter listing requires an input file")
if args.search_fights and args.search_fights > -1 and not args.timeline:
raise parser.error(
"You must specify a timeline file before testing against a specific encounter."
)
if args.file and not ((args.start and args.end) or args.search_fights):
raise parser.error("Log file input requires start and end timestamps")
if args.report and not args.key:
raise parser.error(
"FFlogs parsing requires an API key. Visit https://www.fflogs.com/profile and use the V1 Client Key"
)
# Actually call the script
if args.search_fights and args.search_fights == -1:
print("\n".join(main(args)))
else:
main(args)
|
quisquous/cactbot
|
util/test_timeline.py
|
Python
|
apache-2.0
| 22,631
|
[
"VisIt"
] |
0c7f1bf1aaed40e48854cf4df49623dc551e43f48cf22cf87cd731c8a45e1be5
|
#!/usr/bin/python
#coding=utf8
##----------------------------------------------------------------------------##
## █ █ ##
## ████████ ##
## ██ ██ ##
## ███ █ █ ███ guardchecker.py ##
## █ █ █ █ GuardChecker ##
## ████████████ ##
## █ █ Copyright (c) 2015, 2016 ##
## █ █ █ █ AmazingCow - www.AmazingCow.com ##
## █ █ █ █ ##
## █ █ N2OMatt - n2omatt@amazingcow.com ##
## ████████████ www.amazingcow.com/n2omatt ##
## ##
## This software is licensed as GPLv3 ##
## CHECK THE COPYING FILE TO MORE DETAILS ##
## ##
## Permission is granted to anyone to use this software for any purpose, ##
## including commercial applications, and to alter it and redistribute it ##
## freely, subject to the following restrictions: ##
## ##
## 0. You **CANNOT** change the type of the license. ##
## 1. The origin of this software must not be misrepresented; ##
## you must not claim that you wrote the original software. ##
## 2. If you use this software in a product, an acknowledgment in the ##
## product IS HIGHLY APPRECIATED, both in source and binary forms. ##
## (See opensource.AmazingCow.com/acknowledgment.html for details). ##
## If you will not acknowledge, just send us a email. We'll be ##
## *VERY* happy to see our work being used by other people. :) ##
## The email is: acknowledgment_opensource@AmazingCow.com ##
## 3. Altered source versions must be plainly marked as such, ##
## and must not be misrepresented as being the original software. ##
## 4. This notice may not be removed or altered from any source ##
## distribution. ##
## 5. Most important, you must have fun. ;) ##
## ##
## Visit opensource.amazingcow.com for more open-source projects. ##
## ##
## Enjoy :) ##
##----------------------------------------------------------------------------##
## Imports ##
import getopt;
import os.path;
import os;
import re;
import sys;
################################################################################
## Don't let the standard import error to users - Instead show a ##
## 'nice' error screen describing the error and how to fix it. ##
################################################################################
def __import_error_message_print(pkg_name, pkg_url):
print "Sorry, "
print "guardchecker depends on {} package.".format(pkg_name);
print "Visit {} to get it.".format(pkg_url);
print "Or checkout the README.md to learn other ways to install {}.".format(pkg_name);
exit(1);
## cowtermcolor ##
try:
import cowtermcolor;
from cowtermcolor import *;
except ImportError, e:
__import_error_message_print(
"cowtermcolor",
"http//opensource.amazingcow.com/cowtermcolor.html");
################################################################################
## Globals ##
################################################################################
class Globals:
file_exts = [];
backup_path = None;
exclude_paths = [];
project_root = None;
project_name = None;
opt_interactive = False;
opt_force = False;
opt_dry_run = False;
################################################################################
## Constants ##
################################################################################
class Constants:
FLAG_HELP = "h", "help";
FLAG_VERSION = "v", "version";
FLAG_INTERACTIVE = "i", "interactive";
FLAG_FORCE = "f", "force";
FLAG_PROJECT_NAME = "n", "project-name";
FLAG_EXT = "E", "ext";
FLAG_BACKUP_PATH = "b", "backup-path";
FLAG_EXCLUDE_PATHS = "e", "exclude-path"
FLAG_DRY_RUN = "D", "dry-run";
ALL_FLAGS_SHORT = "".join([
FLAG_HELP [0],
FLAG_VERSION [0],
FLAG_INTERACTIVE [0],
FLAG_FORCE [0],
FLAG_PROJECT_NAME [0] + ":",
FLAG_EXT [0] + ":",
FLAG_BACKUP_PATH [0] + ":",
FLAG_EXCLUDE_PATHS [0] + ":",
FLAG_DRY_RUN [0],
]);
ALL_FLAGS_LONG = [
FLAG_HELP [1],
FLAG_VERSION [1],
FLAG_INTERACTIVE [1],
FLAG_FORCE [1],
FLAG_PROJECT_NAME [1] + "=",
FLAG_EXT [1] + "=",
FLAG_BACKUP_PATH [1] + "=",
FLAG_EXCLUDE_PATHS [1] + "=",
FLAG_DRY_RUN [1],
];
DEFAULT_BACKUP_PATH = "/tmp/guardchecker";
DEFAULT_EXT_HEADER = [".h"];
DEFAULT_PROJECT_ROOT = "./";
#App
APP_NAME = "guardchecker";
APP_VERSION = "0.3.1";
APP_AUTHOR = "N2OMatt <n2omatt@amazingcow.com>"
APP_COPYRIGHT = "\n".join(("Copyright (c) 2015, 2016 - Amazing Cow",
"This is a free software (GPLv3) - Share/Hack it",
"Check opensource.amazingcow.com for more :)"));
################################################################################
## Colors ##
################################################################################
ColorError = Color(RED);
ColorWarning = Color(YELLOW);
ColorOK = Color(GREEN);
ColorPath = Color(MAGENTA);
ColorInfoMsg = Color(BLUE);
################################################################################
## Helper Functions ##
################################################################################
def print_help():
help = """Usage:
guardchecker [-h | -v]
guardchecker [-i | -f] [-D]
[-n <project-name>]
[-E <ext>]
[-e <path>]
[-b <path>]
<project-root>
Options:
*-h --help : Show this screen.
*-v --version : Show app version and copyright.
-i --interactive : Runs in interactive mode (Asks before make a change).
-f --force : Don't prompt anything... (Overridden by -i).
-n --project-name : Set the Project Name (First part of include guard).
-E --ext <ext> : Add the file extension to search (Must include the dot).
-b --backup-dir <path> : Where the original files will be backup-ed.
-e --exclude-path <path> : The path (and all its children) is skipped.
-D --dry-run : No modifications will actually be made.
Notes:
If <project-root> is blank the current dir is assumed.
If --project-name is not set the Project Name is assumed
as the last part of <project-root>.
Multiple --ext <ext> can be used.
Multiple --exclude-path <path> can be used.
Options marked with * are exclusive, i.e. the guardchecker will run that
and exit successfully after the operation.
""";
print help;
exit(0);
def print_version():
print "{} - {} - {}".format(Constants.APP_NAME,
Constants.APP_VERSION,
Constants.APP_AUTHOR);
print Constants.APP_COPYRIGHT;
print;
exit(0);
def print_run_warning():
msg = """{color}WARNING:
THIS IS A VERY, VERY DANGEROUS PROGRAM. IT WILL MESS WITH YOUR SOURCES.
THE PROGRAM WILL MAKE A BACKUP AT ({path}{color}) BUT IS STRONGLY ADVISED
THAT YOU CREATE A HANDMADE BACKUP BEFORE AND PASS ANOTHER CUSTOM BACKUP PATH.
CURRENTLY IT IS VERY DUMB TO SEEK THE INCLUDE GUARDS, SO IS VERY WISE
TO RUN IT IN A INTERACTIVE MODE ({flag}{color}) TO CHECK THE CHANGES
BEFORE THEM HAPPEN.
RUN THIS AT YOUR OWN RISK, WORKS PRETTY WELL IF USED WITH CARE.
ENJOY...{reset}"""
warning = msg.format(color=ColorWarning(auto_reset=False),
path =ColorPath(Constants.DEFAULT_BACKUP_PATH),
flag =ColorInfoMsg("-i | --interactive"),
reset=ColorWarning(auto_reset=True));
print warning;
def print_run_info():
print "Run Options";
print " Interactive :", Globals.opt_interactive;
print " Dry Run :", Globals.opt_dry_run;
print " Backup path :", Globals.backup_path;
print " File exts :", " ".join(Globals.file_exts);
print " Project root :", Globals.project_root;
print " Project name :", Globals.project_name;
print " Exclude Paths :", Globals.exclude_paths;
def should_correct_guard_prompt():
try:
r = raw_input("Correct the guard? [Y/n]:");
if(len(r) != 0 and r.lower() == "n"):
return False;
return True;
except KeyboardInterrupt, e:
print ColorWarning("\nCanceling");
exit(0);
def should_continue_run_prompt():
try:
r = raw_input("Run the program? [y/N]:");
if(len(r) != 0 and r.lower() == "y"):
return True;
return False;
except KeyboardInterrupt, e:
print ColorWarning("\nCanceling");
exit(0);
def system_cmd(cmd):
ret = os.system(cmd);
if(ret != 0):
print_fatal("cmd: {}".format(cmd));
def expand_path(path):
return os.path.abspath(os.path.expanduser(path));
def normalize_path(path):
return os.path.normpath(expand_path(path));
def print_fatal(msg):
print ColorError("[FATAL]"), msg;
exit(1);
################################################################################
## Guard Related Functions ##
################################################################################
def fix_guard(fullpath, incorrect, correct):
#Create the temp directory.
base_path = os.path.dirname(fullpath);
current_backup_path = os.path.join(Globals.backup_path, base_path);
current_backup_path = normalize_path(current_backup_path);
mkdir_cmd = "mkdir -p {}".format(current_backup_path);
system_cmd(mkdir_cmd);
#Replace the incorrect guard with correct one.
#This operation will create a "temporary" file that
#will become the "new correct" file after we copy the original
#file to backup folder.
temp_file_path = fullpath + "_TEMP";
sed_cmd = "sed s/\"{incorrect_guard}\"/\"{correct_guard}\"/g "
sed_cmd += "\"{original_file}\" > \"{temporary_file}\"";
sed_cmd = sed_cmd.format(incorrect_guard=incorrect,
correct_guard=correct,
original_file=fullpath,
temporary_file=temp_file_path);
system_cmd(sed_cmd);
#Now move the "original" file to backup folder
#and rename the temporary file as the "original".
mv_original_cmd = "mv {} {}".format(fullpath, current_backup_path);
mv_temp_cmd = "mv {} {}".format(temp_file_path, fullpath);
system_cmd(mv_original_cmd);
system_cmd(mv_temp_cmd);
backup_fullpath = os.path.join(current_backup_path,
os.path.basename(fullpath));
return backup_fullpath;
def build_correct_guard(fullpath):
path = os.path.normpath(os.path.join(Globals.project_name, fullpath));
return "__{}__".format(path.replace("/", "_").replace(".", "_"));
def check_file(root, filename):
#Make the fullpath for file and open and read all lines.
fullpath = os.path.join(root, filename);
file_lines = open(fullpath).readlines();
#Search entire file for a line with guard.
for file_line in file_lines:
#COWTODO: Find a better way to check if we're dealing with an guard.
#COWHACK: Find a better way to check if we're dealing with an guard.
#COWNOTE: Find a better way to check if we're dealing with an guard.
search_str = "^{}.*".format("#ifndef");
#Check if we have a include guard.
if(re.search(search_str, file_line) is None):
continue;
#Clean the line to let us compare.
current_guard = file_line.replace("\n", "").lstrip("#ifndef ");
correct_guard = build_correct_guard(fullpath);
#Check if guards matches.
if(correct_guard == current_guard):
print ColorOK("[OK]"), fullpath;
break;
#Guards doesn't matches...
print ColorWarning("[NOT MATCH]"), fullpath;
print " Expected :", ColorOK(correct_guard);
print " Found :", ColorError(current_guard);
if(Globals.opt_dry_run):
print ColorInfoMsg("[DRY RUN]");
return;
#If running in non interactive mode, or user asks to correct the guard.
if(not Globals.opt_interactive or should_correct_guard_prompt()):
back_path = fix_guard(fullpath, current_guard, correct_guard);
print " Backup :", ColorPath(back_path);
def scan():
#Change the current working directory to the directory
#of project root. We do this because ease **all** other operations.
os.chdir(Globals.project_root);
## Scan the directories.
for root, dirs, files in os.walk(".", topdown=True):
if(expand_path(root) in Globals.exclude_paths):
print ColorInfoMsg("[SKIPPING]:"), ColorPath(root);
dirs [:] = [];
files[:] = [];
continue;
for file in files:
filename, fileext = os.path.splitext(file);
if(fileext in Globals.file_exts):
check_file(root, file);
################################################################################
## Script Initialization ##
################################################################################
def main():
#Get the command line options.
try:
options = getopt.gnu_getopt(sys.argv[1:],
Constants.ALL_FLAGS_SHORT,
Constants.ALL_FLAGS_LONG);
except Exception, e:
print_fatal(str(e));
#Options switches.
help_resquested = False;
version_requested = False;
#Parse the options.
for option in options[0]:
key, value = option;
key = key.lstrip("-");
#Check if flags are present.
if (key in Constants.FLAG_HELP ): help_resquested = True;
elif(key in Constants.FLAG_VERSION ): version_requested = True;
elif(key in Constants.FLAG_INTERACTIVE ): Globals.opt_interactive = True;
elif(key in Constants.FLAG_FORCE ): Globals.opt_force = True;
elif(key in Constants.FLAG_DRY_RUN ): Globals.opt_dry_run = True;
elif(key in Constants.FLAG_BACKUP_PATH ): Globals.backup_path = value;
elif(key in Constants.FLAG_PROJECT_NAME ): Globals.project_name = value;
elif(key in Constants.FLAG_EXT ): Globals.file_exts.append(value);
elif(key in Constants.FLAG_EXCLUDE_PATHS): Globals.exclude_paths.append(value);
#Check if the exclusive operations are requested.
if(help_resquested ): print_help();
if(version_requested): print_version();
#Check if user passed the project root.
if(len(options[1]) != 0):
Globals.project_root = options[1][0];
#Check if user passed custom info, if not set the defaults.
#Backup path.
if(Globals.backup_path is None or len(Globals.backup_path) == 0):
Globals.backup_path = Constants.DEFAULT_BACKUP_PATH;
#File extensions.
if(len(Globals.file_exts) == 0):
Globals.file_exts = Constants.DEFAULT_EXT_HEADER;
#Project Root.
if(Globals.project_root is None or len(Globals.project_root) == 0):
Globals.project_root = Constants.DEFAULT_PROJECT_ROOT;
#Project Name.
if(Globals.project_name is None or len(Globals.project_name) == 0):
Globals.project_name = os.path.basename(expand_path(Globals.project_root));
#Set the backup path
Globals.backup_path = os.path.join(Globals.backup_path,
Globals.project_name + "_BACKUP");
#Interactive flag ALWAYS override the force flag.
if(Globals.opt_interactive == True):
Globals.opt_force = False;
#Print the program start up info.
if(Globals.opt_force == False):
print_run_warning();
print_run_info();
if(not should_continue_run_prompt()):
print ColorWarning("Aborting...");
exit(0);
#Expand all excluded paths.
Globals.exclude_paths = map(expand_path, Globals.exclude_paths);
# Start...
scan();
if(__name__ == "__main__"):
main();
|
AmazingCow/CppGuardChecker
|
guardchecker.py
|
Python
|
gpl-3.0
| 18,094
|
[
"VisIt"
] |
149c85269e017aac194ac46af6d5db9e6d4f6abea2c662f9864c359306daea90
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
import time
m = 2000
n = 4000
testMehrotra = True
testIPF = False
manualInit = False
display = False
progress = True
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
# Make a sparse matrix with the last column dense
def Rectang(height,width):
A = El.DistSparseMatrix()
A.Resize(height,width)
localHeight = A.LocalHeight()
A.Reserve(5*localHeight)
for sLoc in xrange(localHeight):
s = A.GlobalRow(sLoc)
if s < width:
A.QueueUpdate( s, s, 11 )
if s >= 1 and s-1 < width:
A.QueueUpdate( s, s-1, -1 )
if s+1 < width:
A.QueueUpdate( s, s+1, 2 )
if s >= height and s-height < width:
A.QueueUpdate( s, s-height, -3 )
if s+height < width:
A.QueueUpdate( s, s+height, 4 )
# The dense last column
A.QueueUpdate( s, width-1, -5/height );
A.ProcessQueues()
return A
A = Rectang(m,n)
# Generate a b which implies a primal feasible x
# ==============================================
xGen = El.DistMultiVec()
El.Uniform(xGen,n,1,0.5,0.5)
b = El.DistMultiVec()
El.Zeros( b, m, 1 )
El.SparseMultiply( El.NORMAL, 1., A, xGen, 0., b )
# Generate a c which implies a dual feasible (y,z)
# ================================================
yGen = El.DistMultiVec()
El.Gaussian(yGen,m,1)
c = El.DistMultiVec()
El.Uniform(c,n,1,0.5,0.5)
El.SparseMultiply( El.TRANSPOSE, -1., A, yGen, 1., c )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
El.Display( c, "c" )
# Set up the control structure (and possibly initial guesses)
# ===========================================================
ctrl = El.LPDirectCtrl_d(isSparse=True)
xOrig = El.DistMultiVec()
yOrig = El.DistMultiVec()
zOrig = El.DistMultiVec()
if manualInit:
El.Uniform(xOrig,n,1,0.5,0.4999)
El.Uniform(yOrig,m,1,0.5,0.4999)
El.Uniform(zOrig,n,1,0.5,0.4999)
x = El.DistMultiVec()
y = El.DistMultiVec()
z = El.DistMultiVec()
if testMehrotra:
ctrl.approach = El.LP_MEHROTRA
ctrl.mehrotraCtrl.primalInit = manualInit
ctrl.mehrotraCtrl.dualInit = manualInit
ctrl.mehrotraCtrl.progress = progress
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
startMehrotra = time.clock()
El.LPDirect(A,b,c,x,y,z,ctrl)
endMehrotra = time.clock()
if worldRank == 0:
print "Mehrotra time:", endMehrotra-startMehrotra
if display:
El.Display( x, "x Mehrotra" )
El.Display( y, "y Mehrotra" )
El.Display( z, "z Mehrotra" )
obj = El.Dot(c,x)
if worldRank == 0:
print "Mehrotra c^T x =", obj
if testIPF:
ctrl.approach = El.LP_IPF
ctrl.ipfCtrl.primalInit = manualInit
ctrl.ipfCtrl.dualInit = manualInit
ctrl.ipfCtrl.progress = progress
ctrl.ipfCtrl.lineSearchCtrl.progress = progress
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
startIPF = time.clock()
El.LPDirect(A,b,c,x,y,z,ctrl)
endIPF = time.clock()
if worldRank == 0:
print "IPF time:", endIPF-startIPF
if display:
El.Display( x, "x IPF" )
El.Display( y, "y IPF" )
El.Display( z, "z IPF" )
obj = El.Dot(c,x)
if worldRank == 0:
print "IPF c^T x =", obj
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
|
justusc/Elemental
|
examples/interface/LPDirect.py
|
Python
|
bsd-3-clause
| 3,491
|
[
"Gaussian"
] |
6b415323a8931b165c79b5d52d1abe4db0082ec7d5f7057ed272329e74eb8c13
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import ldap
import re
import sys
import urllib
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from nose.tools import assert_true, assert_equal, assert_false, assert_not_equal
import desktop.conf
from desktop.lib.django_test_util import make_logged_in_client
from django.contrib.auth.models import User, Group
from django.utils.encoding import smart_unicode
from django.core.urlresolvers import reverse
from django.test.client import Client
from useradmin.models import HuePermission, GroupPermission, UserProfile
from useradmin.models import get_profile, get_default_user_group
import useradmin.conf
import useradmin.ldap_access
from hadoop import pseudo_hdfs4
from useradmin.password_policy import reset_password_policy
def reset_all_users():
"""Reset to a clean state by deleting all users"""
for user in User.objects.all():
user.delete()
def reset_all_groups():
"""Reset to a clean state by deleting all groups"""
useradmin.conf.DEFAULT_USER_GROUP.set_for_testing(None)
for grp in Group.objects.all():
grp.delete()
class LdapTestConnection(object):
"""
Test class which mimics the behaviour of LdapConnection (from ldap_access.py).
It also includes functionality to fake modifications to an LDAP server. It is designed
as a singleton, to allow for changes to persist across discrete connections.
This class assumes uid is the user_name_attr.
"""
def __init__(self):
self._instance = LdapTestConnection.Data()
def add_user_group_for_test(self, user, group):
self._instance.groups[group]['members'].append(user)
def remove_user_group_for_test(self, user, group):
self._instance.groups[group]['members'].remove(user)
def add_posix_user_group_for_test(self, user, group):
self._instance.groups[group]['posix_members'].append(user)
def remove_posix_user_group_for_test(self, user, group):
self._instance.groups[group]['posix_members'].remove(user)
def find_users(self, username_pattern, search_attr=None, user_name_attr=None, find_by_dn=False, scope=ldap.SCOPE_SUBTREE):
""" Returns info for a particular user via a case insensitive search """
if find_by_dn:
data = filter(lambda attrs: attrs['dn'] == username_pattern, self._instance.users.values())
else:
username_pattern = "^%s$" % username_pattern.replace('.','\\.').replace('*', '.*')
username_fsm = re.compile(username_pattern, flags=re.I)
usernames = filter(lambda username: username_fsm.match(username), self._instance.users.keys())
data = [self._instance.users.get(username) for username in usernames]
return data
def find_groups(self, groupname_pattern, search_attr=None, group_name_attr=None, group_member_attr=None, group_filter=None, find_by_dn=False, scope=ldap.SCOPE_SUBTREE):
""" Return all groups in the system with parents and children """
if find_by_dn:
data = filter(lambda attrs: attrs['dn'] == groupname_pattern, self._instance.groups.values())
# SCOPE_SUBTREE means we return all sub-entries of the desired entry along with the desired entry.
if data and scope == ldap.SCOPE_SUBTREE:
sub_data = filter(lambda attrs: attrs['dn'].endswith(data[0]['dn']), self._instance.groups.values())
data.extend(sub_data)
else:
groupname_pattern = "^%s$" % groupname_pattern.replace('.','\\.').replace('*', '.*')
groupnames = filter(lambda username: re.match(groupname_pattern, username), self._instance.groups.keys())
data = [self._instance.groups.get(groupname) for groupname in groupnames]
return data
def find_members_of_group(self, dn, search_attr, ldap_filter, scope=ldap.SCOPE_SUBTREE):
members = []
for group_info in self._instance.groups:
if group_info['dn'] == dn:
members.extend(group_info['members'])
members = set(members)
users = []
for user_info in self._instance.users:
if user_info['dn'] in members:
users.append(user_info)
groups = []
for group_info in self._instance.groups:
if group_info['dn'] in members:
groups.append(group_info)
return users + groups
def find_users_of_group(self, dn):
members = []
for group_info in self._instance.groups.values():
if group_info['dn'] == dn:
members.extend(group_info['members'])
members = set(members)
users = []
for user_info in self._instance.users.values():
if user_info['dn'] in members:
users.append(user_info)
return users
def find_groups_of_group(self, dn):
members = []
for group_info in self._instance.groups.values():
if group_info['dn'] == dn:
members.extend(group_info['members'])
groups = []
for group_info in self._instance.groups.values():
if group_info['dn'] in members:
groups.append(group_info)
return groups
class Data:
def __init__(self):
self.users = {'moe': {'dn': 'uid=moe,ou=People,dc=example,dc=com', 'username':'moe', 'first':'Moe', 'email':'moe@stooges.com', 'groups': ['cn=TestUsers,ou=Groups,dc=example,dc=com']},
'lårry': {'dn': 'uid=lårry,ou=People,dc=example,dc=com', 'username':'lårry', 'first':'Larry', 'last':'Stooge', 'email':'larry@stooges.com', 'groups': ['cn=TestUsers,ou=Groups,dc=example,dc=com', 'cn=Test Administrators,cn=TestUsers,ou=Groups,dc=example,dc=com']},
'curly': {'dn': 'uid=curly,ou=People,dc=example,dc=com', 'username':'curly', 'first':'Curly', 'last':'Stooge', 'email':'curly@stooges.com', 'groups': ['cn=TestUsers,ou=Groups,dc=example,dc=com', 'cn=Test Administrators,cn=TestUsers,ou=Groups,dc=example,dc=com']},
'Rock': {'dn': 'uid=Rock,ou=People,dc=example,dc=com', 'username':'Rock', 'first':'rock', 'last':'man', 'email':'rockman@stooges.com', 'groups': ['cn=Test Administrators,cn=TestUsers,ou=Groups,dc=example,dc=com']},
'nestedguy': {'dn': 'uid=nestedguy,ou=People,dc=example,dc=com', 'username':'nestedguy', 'first':'nested', 'last':'guy', 'email':'nestedguy@stooges.com', 'groups': ['cn=NestedGroup,ou=Groups,dc=example,dc=com']},
'otherguy': {'dn': 'uid=otherguy,ou=People,dc=example,dc=com', 'username':'otherguy', 'first':'Other', 'last':'Guy', 'email':'other@guy.com'},
'posix_person': {'dn': 'uid=posix_person,ou=People,dc=example,dc=com', 'username': 'posix_person', 'first': 'pos', 'last': 'ix', 'email': 'pos@ix.com'},
'posix_person2': {'dn': 'uid=posix_person2,ou=People,dc=example,dc=com', 'username': 'posix_person2', 'first': 'pos', 'last': 'ix', 'email': 'pos@ix.com'},
'user with space': {'dn': 'uid=user with space,ou=People,dc=example,dc=com', 'username': 'user with space', 'first': 'user', 'last': 'space', 'email': 'user@space.com'},
'spaceless': {'dn': 'uid=user without space,ou=People,dc=example,dc=com', 'username': 'spaceless', 'first': 'user', 'last': 'space', 'email': 'user@space.com'},
'test_toolongusernametoolongusername': {'dn': 'uid=test_toolongusernametoolongusername,ou=People,dc=example,dc=com', 'username': 'test_toolongusernametoolongusername', 'first': 'toolong', 'last': 'username', 'email': 'toolong@username.com'},
'test_longfirstname': {'dn': 'uid=test_longfirstname,ou=People,dc=example,dc=com', 'username': 'test_longfirstname', 'first': 'test_longfirstname_test_longfirstname', 'last': 'username', 'email': 'toolong@username.com'},}
self.groups = {'TestUsers': {
'dn': 'cn=TestUsers,ou=Groups,dc=example,dc=com',
'name':'TestUsers',
'members':['uid=moe,ou=People,dc=example,dc=com','uid=lårry,ou=People,dc=example,dc=com','uid=curly,ou=People,dc=example,dc=com','uid=test_toolongusernametoolongusername,ou=People,dc=example,dc=com'],
'posix_members':[]},
'Test Administrators': {
'dn': 'cn=Test Administrators,cn=TestUsers,ou=Groups,dc=example,dc=com',
'name':'Test Administrators',
'members':['uid=Rock,ou=People,dc=example,dc=com','uid=lårry,ou=People,dc=example,dc=com','uid=curly,ou=People,dc=example,dc=com','uid=test_toolongusernametoolongusername,ou=People,dc=example,dc=com'],
'posix_members':[]},
'OtherGroup': {
'dn': 'cn=OtherGroup,cn=TestUsers,ou=Groups,dc=example,dc=com',
'name':'OtherGroup',
'members':[],
'posix_members':[]},
'NestedGroups': {
'dn': 'cn=NestedGroups,ou=Groups,dc=example,dc=com',
'name':'NestedGroups',
'members':['cn=NestedGroup,ou=Groups,dc=example,dc=com'],
'posix_members':[]
},
'NestedGroup': {
'dn': 'cn=NestedGroup,ou=Groups,dc=example,dc=com',
'name':'NestedGroup',
'members':['uid=nestedguy,ou=People,dc=example,dc=com'],
'posix_members':[]
},
'NestedPosixGroups': {
'dn': 'cn=NestedPosixGroups,ou=Groups,dc=example,dc=com',
'name':'NestedPosixGroups',
'members':['cn=PosixGroup,ou=Groups,dc=example,dc=com'],
'posix_members':[]
},
'PosixGroup': {
'dn': 'cn=PosixGroup,ou=Groups,dc=example,dc=com',
'name':'PosixGroup',
'members':[],
'posix_members':['posix_person','lårry']},
'PosixGroup1': {
'dn': 'cn=PosixGroup1,cn=PosixGroup,ou=Groups,dc=example,dc=com',
'name':'PosixGroup1',
'members':[],
'posix_members':['posix_person2']},
}
def test_invalid_username():
BAD_NAMES = ('-foo', 'foo:o', 'foo o', ' foo')
c = make_logged_in_client(username="test", is_superuser=True)
for bad_name in BAD_NAMES:
assert_true(c.get('/useradmin/users/new'))
response = c.post('/useradmin/users/new', dict(username=bad_name, password1="test", password2="test"))
assert_true('not allowed' in response.context["form"].errors['username'][0])
class BaseUserAdminTests(object):
@classmethod
def setUpClass(cls):
cls._class_resets = [
useradmin.conf.DEFAULT_USER_GROUP.set_for_testing(None),
]
@classmethod
def tearDownClass(cls):
for reset in cls._class_resets:
reset()
def setUp(self):
reset_all_users()
reset_all_groups()
def tearDown(self):
pass
class TestUserAdmin(BaseUserAdminTests):
def test_group_permissions(self):
# Get ourselves set up with a user and a group
c = make_logged_in_client(username="test", is_superuser=True)
Group.objects.create(name="test-group")
test_user = User.objects.get(username="test")
test_user.groups.add(Group.objects.get(name="test-group"))
test_user.save()
# Make sure that a superuser can always access applications
response = c.get('/useradmin/users')
assert_true('Hue Users' in response.content)
assert_true(len(GroupPermission.objects.all()) == 0)
c.post('/useradmin/groups/edit/test-group',
dict(name="test-group",
members=[User.objects.get(username="test").pk],
permissions=[HuePermission.objects.get(app='useradmin',action='access').pk],
save="Save"), follow=True)
assert_true(len(GroupPermission.objects.all()) == 1)
# Now test that we have limited access
c1 = make_logged_in_client(username="nonadmin", is_superuser=False)
response = c1.get('/useradmin/users')
assert_true('You do not have permission to access the Useradmin application.' in response.content)
# Add the non-admin to a group that should grant permissions to the app
test_user = User.objects.get(username="nonadmin")
test_user.groups.add(Group.objects.get(name='test-group'))
test_user.save()
# Check that we have access now
response = c1.get('/useradmin/users')
assert_true(get_profile(test_user).has_hue_permission('access','useradmin'))
assert_true('Hue Users' in response.content)
# Make sure we can't modify permissions
response = c1.get('/useradmin/permissions/edit/useradmin/access')
assert_true('must be a superuser to change permissions' in response.content)
# And revoke access from the group
c.post('/useradmin/permissions/edit/useradmin/access',
dict(app='useradmin',
priv='access',
groups=[],
save="Save"), follow=True)
assert_true(len(GroupPermission.objects.all()) == 0)
assert_false(get_profile(test_user).has_hue_permission('access','useradmin'))
# We should no longer have access to the app
response = c1.get('/useradmin/users')
assert_true('You do not have permission to access the Useradmin application.' in response.content)
def test_default_group(self):
resets = [
useradmin.conf.DEFAULT_USER_GROUP.set_for_testing('test_default')
]
try:
get_default_user_group()
c = make_logged_in_client(username='test', is_superuser=True)
# Create default group if it doesn't already exist.
assert_true(Group.objects.filter(name='test_default').exists())
# Try deleting the default group
assert_true(Group.objects.filter(name='test_default').exists())
response = c.post('/useradmin/groups/delete', {'group_names': ['test_default']})
assert_true('default user group may not be deleted' in response.content)
assert_true(Group.objects.filter(name='test_default').exists())
# Change the name of the default group, and try deleting again
resets.append(useradmin.conf.DEFAULT_USER_GROUP.set_for_testing('new_default'))
response = c.post('/useradmin/groups/delete' , {'group_names': ['test_default']})
assert_false(Group.objects.filter(name='test_default').exists())
assert_true(Group.objects.filter(name='new_default').exists())
finally:
for reset in resets:
reset()
def test_get_profile(self):
# Ensure profiles are created after get_profile is called.
c = make_logged_in_client(username='test', password='test', is_superuser=True)
assert_equal(0, UserProfile.objects.count())
p = get_profile(User.objects.get(username='test'))
assert_equal(1, UserProfile.objects.count())
def test_group_admin(self):
c = make_logged_in_client(username="test", is_superuser=True)
response = c.get('/useradmin/groups')
# No groups just yet
assert_true(len(response.context["groups"]) == 0)
assert_true("Hue Groups" in response.content)
# Create a group
response = c.get('/useradmin/groups/new')
assert_equal('/useradmin/groups/new', response.context['action'])
c.post('/useradmin/groups/new', dict(name="testgroup"))
# We should have an empty group in the DB now
assert_true(len(Group.objects.all()) == 1)
assert_true(Group.objects.filter(name="testgroup").exists())
assert_true(len(Group.objects.get(name="testgroup").user_set.all()) == 0)
# And now, just for kicks, let's try adding a user
response = c.post('/useradmin/groups/edit/testgroup',
dict(name="testgroup",
members=[User.objects.get(username="test").pk],
save="Save"), follow=True)
assert_true(len(Group.objects.get(name="testgroup").user_set.all()) == 1)
assert_true(Group.objects.get(name="testgroup").user_set.filter(username="test").exists())
# Test some permissions
c2 = make_logged_in_client(username="nonadmin", is_superuser=False)
# Need to give access to the user for the rest of the test
group = Group.objects.create(name="access-group")
perm = HuePermission.objects.get(app='useradmin', action='access')
GroupPermission.objects.create(group=group, hue_permission=perm)
test_user = User.objects.get(username="nonadmin")
test_user.groups.add(Group.objects.get(name="access-group"))
test_user.save()
# Make sure non-superusers can't do bad things
response = c2.get('/useradmin/groups/new')
assert_true("You must be a superuser" in response.content)
response = c2.get('/useradmin/groups/edit/testgroup')
assert_true("You must be a superuser" in response.content)
response = c2.post('/useradmin/groups/new', dict(name="nonsuperuser"))
assert_true("You must be a superuser" in response.content)
response = c2.post('/useradmin/groups/edit/testgroup',
dict(name="nonsuperuser",
members=[User.objects.get(username="test").pk],
save="Save"), follow=True)
assert_true("You must be a superuser" in response.content)
# Should be one group left, because we created the other group
response = c.post('/useradmin/groups/delete', {'group_names': ['testgroup']})
assert_true(len(Group.objects.all()) == 1)
group_count = len(Group.objects.all())
response = c.post('/useradmin/groups/new', dict(name="with space"))
assert_equal(len(Group.objects.all()), group_count + 1)
def test_user_admin_password_policy(self):
# Set up password policy
password_hint = password_error_msg = ("The password must be at least 8 characters long, "
"and must contain both uppercase and lowercase letters, "
"at least one number, and at least one special character.")
password_rule = "^(?=.*?[A-Z])(?=(.*[a-z]){1,})(?=(.*[\d]){1,})(?=(.*[\W_]){1,}).{8,}$"
resets = [
useradmin.conf.PASSWORD_POLICY.IS_ENABLED.set_for_testing(True),
useradmin.conf.PASSWORD_POLICY.PWD_RULE.set_for_testing(password_rule),
useradmin.conf.PASSWORD_POLICY.PWD_HINT.set_for_testing(password_hint),
useradmin.conf.PASSWORD_POLICY.PWD_ERROR_MESSAGE.set_for_testing(password_error_msg),
]
try:
reset_password_policy()
# Test first-ever login with password policy enabled
c = Client()
response = c.get('/accounts/login/')
assert_equal(200, response.status_code)
assert_true(response.context['first_login_ever'])
response = c.post('/accounts/login/', dict(username="test_first_login", password="foo"))
assert_true(response.context['first_login_ever'])
assert_equal([password_error_msg], response.context["form"]["password"].errors)
response = c.post('/accounts/login/', dict(username="test_first_login", password="foobarTest1["), follow=True)
assert_equal(200, response.status_code)
assert_true(User.objects.get(username="test_first_login").is_superuser)
assert_true(User.objects.get(username="test_first_login").check_password("foobarTest1["))
c.get('/accounts/logout')
# Test changing a user's password
c = make_logged_in_client('superuser', is_superuser=True)
# Test password hint is displayed
response = c.get('/useradmin/users/edit/superuser')
assert_true(password_hint in response.content)
# Password is less than 8 characters
response = c.post('/useradmin/users/edit/superuser',
dict(username="superuser",
is_superuser=True,
password1="foo",
password2="foo"))
assert_equal([password_error_msg], response.context["form"]["password1"].errors)
# Password is more than 8 characters long but does not have a special character
response = c.post('/useradmin/users/edit/superuser',
dict(username="superuser",
is_superuser=True,
password1="foobarTest1",
password2="foobarTest1"))
assert_equal([password_error_msg], response.context["form"]["password1"].errors)
# Password1 and Password2 are valid but they do not match
response = c.post('/useradmin/users/edit/superuser',
dict(username="superuser",
is_superuser=True,
password1="foobarTest1??",
password2="foobarTest1?",
password_old="foobarTest1[",
is_active=True))
assert_equal(["Passwords do not match."], response.context["form"]["password2"].errors)
# Password is valid now
c.post('/useradmin/users/edit/superuser',
dict(username="superuser",
is_superuser=True,
password1="foobarTest1[",
password2="foobarTest1[",
password_old="test",
is_active=True))
assert_true(User.objects.get(username="superuser").is_superuser)
assert_true(User.objects.get(username="superuser").check_password("foobarTest1["))
# Test creating a new user
response = c.get('/useradmin/users/new')
assert_true(password_hint in response.content)
# Password is more than 8 characters long but does not have a special character
response = c.post('/useradmin/users/new',
dict(username="test_user",
is_superuser=False,
password1="foo",
password2="foo"))
assert_equal({'password1': [password_error_msg], 'password2': [password_error_msg]},
response.context["form"].errors)
# Password is more than 8 characters long but does not have a special character
response = c.post('/useradmin/users/new',
dict(username="test_user",
is_superuser=False,
password1="foobarTest1",
password2="foobarTest1"))
assert_equal({'password1': [password_error_msg], 'password2': [password_error_msg]},
response.context["form"].errors)
# Password1 and Password2 are valid but they do not match
response = c.post('/useradmin/users/new',
dict(username="test_user",
is_superuser=False,
password1="foobarTest1[",
password2="foobarTest1?"))
assert_equal({'password2': ["Passwords do not match."]}, response.context["form"].errors)
# Password is valid now
c.post('/useradmin/users/new',
dict(username="test_user",
is_superuser=False,
password1="foobarTest1[",
password2="foobarTest1[", is_active=True))
assert_false(User.objects.get(username="test_user").is_superuser)
assert_true(User.objects.get(username="test_user").check_password("foobarTest1["))
finally:
for reset in resets:
reset()
def test_user_admin(self):
FUNNY_NAME = '~`!@#$%^&*()_-+={}[]|\;"<>?/,.'
FUNNY_NAME_QUOTED = urllib.quote(FUNNY_NAME)
resets = [
useradmin.conf.DEFAULT_USER_GROUP.set_for_testing('test_default'),
useradmin.conf.PASSWORD_POLICY.IS_ENABLED.set_for_testing(False),
]
try:
reset_password_policy()
c = make_logged_in_client('test', is_superuser=True)
user = User.objects.get(username='test')
# Test basic output.
response = c.get('/useradmin/')
assert_true(len(response.context["users"]) > 0)
assert_true("Hue Users" in response.content)
# Test editing a superuser
# Just check that this comes back
response = c.get('/useradmin/users/edit/test')
# Edit it, to add a first and last name
response = c.post('/useradmin/users/edit/test',
dict(username="test",
first_name=u"Inglés",
last_name=u"Español",
is_superuser="True",
is_active="True"),
follow=True)
assert_true("User information updated" in response.content,
"Notification should be displayed in: %s" % response.content)
# Edit it, can't change username
response = c.post('/useradmin/users/edit/test',
dict(username="test2",
first_name=u"Inglés",
last_name=u"Español",
is_superuser="True",
is_active="True"),
follow=True)
assert_true("You cannot change a username" in response.content)
# Now make sure that those were materialized
response = c.get('/useradmin/users/edit/test')
assert_equal(smart_unicode("Inglés"), response.context["form"].instance.first_name)
assert_true("Español" in response.content)
# Shouldn't be able to demote to non-superuser
response = c.post('/useradmin/users/edit/test', dict(username="test",
first_name=u"Inglés", last_name=u"Español",
is_superuser=False, is_active=True))
assert_true("You cannot remove" in response.content,
"Shouldn't be able to remove the last superuser")
# Shouldn't be able to delete oneself
response = c.post('/useradmin/users/delete', {u'user_ids': [user.id]})
assert_true("You cannot remove yourself" in response.content,
"Shouldn't be able to delete the last superuser")
# Let's try changing the password
response = c.post('/useradmin/users/edit/test', dict(username="test", first_name="Tom", last_name="Tester", is_superuser=True, password1="foo", password2="foobar"))
assert_equal(["Passwords do not match."], response.context["form"]["password2"].errors, "Should have complained about mismatched password")
# Old password not confirmed
response = c.post('/useradmin/users/edit/test', dict(username="test", first_name="Tom", last_name="Tester", password1="foo", password2="foo", is_active=True, is_superuser=True))
assert_equal(["The old password does not match the current password."], response.context["form"]["password_old"].errors, "Should have complained about old password")
# Good now
response = c.post('/useradmin/users/edit/test', dict(username="test", first_name="Tom", last_name="Tester", password1="foo", password2="foo", password_old="test", is_active=True, is_superuser=True))
assert_true(User.objects.get(username="test").is_superuser)
assert_true(User.objects.get(username="test").check_password("foo"))
# Change it back!
response = c.post('/useradmin/users/edit/test', dict(username="test", first_name="Tom", last_name="Tester", password1="test", password2="test", password_old="foo", is_active="True", is_superuser="True"))
assert_true(User.objects.get(username="test").check_password("test"))
assert_true(make_logged_in_client(username = "test", password = "test"), "Check that we can still login.")
# Check new user form for default group
group = get_default_user_group()
response = c.get('/useradmin/users/new')
assert_true(response)
assert_true(('<option value="%s" selected="selected">%s</option>' % (group.id, group.name)) in str(response))
# Create a new regular user (duplicate name)
response = c.post('/useradmin/users/new', dict(username="test", password1="test", password2="test"))
assert_equal({ 'username': ["User with this Username already exists."]}, response.context["form"].errors)
# Create a new regular user (for real)
response = c.post('/useradmin/users/new', dict(username=FUNNY_NAME,
password1="test",
password2="test",
is_active="True"))
response = c.get('/useradmin/')
assert_true(FUNNY_NAME_QUOTED in response.content)
assert_true(len(response.context["users"]) > 1)
assert_true("Hue Users" in response.content)
# Validate profile is created.
assert_true(UserProfile.objects.filter(user__username=FUNNY_NAME).exists())
# Need to give access to the user for the rest of the test
group = Group.objects.create(name="test-group")
perm = HuePermission.objects.get(app='useradmin', action='access')
GroupPermission.objects.create(group=group, hue_permission=perm)
# Verify that we can modify user groups through the user admin pages
response = c.post('/useradmin/users/new', dict(username="group_member", password1="test", password2="test", groups=[group.pk]))
User.objects.get(username='group_member')
assert_true(User.objects.get(username='group_member').groups.filter(name='test-group').exists())
response = c.post('/useradmin/users/edit/group_member', dict(username="group_member", groups=[]))
assert_false(User.objects.get(username='group_member').groups.filter(name='test-group').exists())
# Check permissions by logging in as the new user
c_reg = make_logged_in_client(username=FUNNY_NAME, password="test")
test_user = User.objects.get(username=FUNNY_NAME)
test_user.groups.add(Group.objects.get(name="test-group"))
test_user.save()
# Regular user should be able to modify oneself
response = c_reg.post('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,),
dict(username = FUNNY_NAME,
first_name = "Hello",
is_active = True,
groups=[group.id for group in test_user.groups.all()]), follow=True)
assert_equal(response.status_code, 200)
response = c_reg.get('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,), follow=True)
assert_equal(response.status_code, 200)
assert_equal("Hello", response.context["form"].instance.first_name)
funny_user = User.objects.get(username=FUNNY_NAME)
# Can't edit other people.
response = c_reg.post("/useradmin/users/delete", {u'user_ids': [funny_user.id]})
assert_true("You must be a superuser" in response.content,
"Regular user can't edit other people")
# Revert to regular "test" user, that has superuser powers.
c_su = make_logged_in_client()
# Inactivate FUNNY_NAME
c_su.post('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,),
dict(username = FUNNY_NAME,
first_name = "Hello",
is_active = False))
# Now make sure FUNNY_NAME can't log back in
response = c_reg.get('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,))
assert_true(response.status_code == 302 and "login" in response["location"],
"Inactivated user gets redirected to login page")
# Delete that regular user
funny_profile = get_profile(test_user)
response = c_su.post('/useradmin/users/delete', {u'user_ids': [funny_user.id]})
assert_equal(302, response.status_code)
assert_false(User.objects.filter(username=FUNNY_NAME).exists())
assert_false(UserProfile.objects.filter(id=funny_profile.id).exists())
# Bulk delete users
u1 = User.objects.create(username='u1', password="u1")
u2 = User.objects.create(username='u2', password="u2")
assert_equal(User.objects.filter(username__in=['u1', 'u2']).count(), 2)
response = c_su.post('/useradmin/users/delete', {u'user_ids': [u1.id, u2.id]})
assert_equal(User.objects.filter(username__in=['u1', 'u2']).count(), 0)
# Make sure that user deletion works if the user has never performed a request.
funny_user = User.objects.create(username=FUNNY_NAME, password='test')
assert_true(User.objects.filter(username=FUNNY_NAME).exists())
assert_false(UserProfile.objects.filter(user__username=FUNNY_NAME).exists())
response = c_su.post('/useradmin/users/delete', {u'user_ids': [funny_user.id]})
assert_equal(302, response.status_code)
assert_false(User.objects.filter(username=FUNNY_NAME).exists())
assert_false(UserProfile.objects.filter(user__username=FUNNY_NAME).exists())
# You shouldn't be able to create a user without a password
response = c_su.post('/useradmin/users/new', dict(username="test"))
assert_true("You must specify a password when creating a new user." in response.content)
finally:
for reset in resets:
reset()
def test_list_for_autocomplete(self):
# Now the autocomplete has access to all the users and groups
c1 = make_logged_in_client('test_list_for_autocomplete', is_superuser=False, groupname='test_list_for_autocomplete')
c2_same_group = make_logged_in_client('test_list_for_autocomplete2', is_superuser=False, groupname='test_list_for_autocomplete')
c3_other_group = make_logged_in_client('test_list_for_autocomplete3', is_superuser=False, groupname='test_list_for_autocomplete_other_group')
# c1 is in the same group as c2
response = c1.get(reverse('useradmin.views.list_for_autocomplete'), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
content = json.loads(response.content)
users = [smart_unicode(user['username']) for user in content['users']]
groups = [smart_unicode(user['name']) for user in content['groups']]
assert_equal([u'test_list_for_autocomplete2', u'test_list_for_autocomplete3'], users)
assert_true(u'test_list_for_autocomplete' in groups, groups)
assert_true(u'test_list_for_autocomplete_other_group' in groups, groups)
# c2 is in the same group as c1
response = c2_same_group.get(reverse('useradmin.views.list_for_autocomplete'), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
content = json.loads(response.content)
users = [smart_unicode(user['username']) for user in content['users']]
groups = [smart_unicode(user['name']) for user in content['groups']]
assert_equal([u'test_list_for_autocomplete', u'test_list_for_autocomplete3'], users)
assert_true(u'test_list_for_autocomplete' in groups, groups)
assert_true(u'test_list_for_autocomplete_other_group' in groups, groups)
# c3 is alone except for groups
response = c3_other_group.get(reverse('useradmin.views.list_for_autocomplete'), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
content = json.loads(response.content)
users = [smart_unicode(user['username']) for user in content['users']]
groups = [smart_unicode(user['name']) for user in content['groups']]
assert_equal([u'test_list_for_autocomplete', u'test_list_for_autocomplete2'], users)
assert_true(u'test_list_for_autocomplete' in groups, groups)
assert_true(u'test_list_for_autocomplete_other_group' in groups, groups)
class TestUserAdminWithHadoop(BaseUserAdminTests):
requires_hadoop = True
def test_ensure_home_directory(self):
raise SkipTest
resets = [
useradmin.conf.PASSWORD_POLICY.IS_ENABLED.set_for_testing(False),
]
try:
reset_password_policy()
# Cluster and client for home directory creation
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True, groupname='test1')
cluster.fs.setuser(cluster.superuser)
# Create a user with a home directory
assert_false(cluster.fs.exists('/user/test1'))
response = c.post('/useradmin/users/new', dict(username="test1", password1='test', password2='test', ensure_home_directory=True))
assert_true(cluster.fs.exists('/user/test1'))
dir_stat = cluster.fs.stats('/user/test1')
assert_equal('test1', dir_stat.user)
assert_equal('test1', dir_stat.group)
assert_equal('40755', '%o' % dir_stat.mode)
# Create a user, then add their home directory
assert_false(cluster.fs.exists('/user/test2'))
response = c.post('/useradmin/users/new', dict(username="test2", password1='test', password2='test'))
assert_false(cluster.fs.exists('/user/test2'))
response = c.post('/useradmin/users/edit/%s' % "test2", dict(username="test2", password1='test', password2='test', password_old="test", ensure_home_directory=True))
assert_true(cluster.fs.exists('/user/test2'))
dir_stat = cluster.fs.stats('/user/test2')
assert_equal('test2', dir_stat.user)
assert_equal('test2', dir_stat.group)
assert_equal('40755', '%o' % dir_stat.mode)
finally:
for reset in resets:
reset()
class MockLdapConnection(object):
def __init__(self, ldap_config, ldap_url, username, password, ldap_cert):
self.ldap_config = ldap_config
self.ldap_url = ldap_url
self.username = username
self.password = password
self.ldap_cert = ldap_cert
def test_get_connection_bind_password():
# Unfortunately our tests leak a cached test ldap connection across functions, so we need to clear it out.
useradmin.ldap_access.CACHED_LDAP_CONN = None
# Monkey patch the LdapConnection class as we don't want to make a real connection.
OriginalLdapConnection = useradmin.ldap_access.LdapConnection
reset = [
desktop.conf.LDAP.LDAP_URL.set_for_testing('default.example.com'),
desktop.conf.LDAP.BIND_PASSWORD.set_for_testing('default-password'),
desktop.conf.LDAP.LDAP_SERVERS.set_for_testing({
'test': {
'ldap_url': 'test.example.com',
'bind_password': 'test-password',
}
})
]
try:
useradmin.ldap_access.LdapConnection = MockLdapConnection
connection = useradmin.ldap_access.get_connection_from_server()
assert_equal(connection.password, 'default-password')
connection = useradmin.ldap_access.get_connection_from_server('test')
assert_equal(connection.password, 'test-password')
finally:
useradmin.ldap_access.LdapConnection = OriginalLdapConnection
for f in reset:
f()
def test_get_connection_bind_password_script():
# Unfortunately our tests leak a cached test ldap connection across functions, so we need to clear it out.
useradmin.ldap_access.CACHED_LDAP_CONN = None
SCRIPT = '%s -c "print \'\\n password from script \\n\'"' % sys.executable
# Monkey patch the LdapConnection class as we don't want to make a real connection.
OriginalLdapConnection = useradmin.ldap_access.LdapConnection
reset = [
desktop.conf.LDAP.LDAP_URL.set_for_testing('default.example.com'),
desktop.conf.LDAP.BIND_PASSWORD_SCRIPT.set_for_testing(
'%s -c "print \'\\n default password \\n\'"' % sys.executable
),
desktop.conf.LDAP.LDAP_SERVERS.set_for_testing({
'test': {
'ldap_url': 'test.example.com',
'bind_password_script':
'%s -c "print \'\\n test password \\n\'"' % sys.executable,
}
})
]
try:
useradmin.ldap_access.LdapConnection = MockLdapConnection
connection = useradmin.ldap_access.get_connection_from_server()
assert_equal(connection.password, ' default password ')
connection = useradmin.ldap_access.get_connection_from_server('test')
assert_equal(connection.password, ' test password ')
finally:
useradmin.ldap_access.LdapConnection = OriginalLdapConnection
for f in reset:
f()
def test_last_activity():
c = make_logged_in_client(username="test", is_superuser=True)
profile = UserProfile.objects.get(user__username='test')
assert_not_equal(profile.last_activity, 0)
|
vmax-feihu/hue
|
apps/useradmin/src/useradmin/tests.py
|
Python
|
apache-2.0
| 40,666
|
[
"MOE"
] |
35d6cdedf4e86670d07d3d8a5ae5666dd92317796de71fdac8011dbd43311ee5
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Chris Houseknecht <@chouseknecht>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s
short_description: Manage Kubernetes (K8s) objects
version_added: "2.6"
author:
- "Chris Houseknecht (@chouseknecht)"
- "Fabian von Feilitzsch (@fabianvf)"
description:
- Use the OpenShift Python client to perform CRUD operations on K8s objects.
- Pass the object definition from a source file or inline. See examples for reading
files and using Jinja templates or vault-encrypted files.
- Access to the full range of K8s APIs.
- Use the M(k8s_facts) module to obtain a list of items about an object of type C(kind)
- Authenticate using either a config file, certificates, password or token.
- Supports check mode.
extends_documentation_fragment:
- k8s_state_options
- k8s_name_options
- k8s_resource_options
- k8s_auth_options
options:
merge_type:
description:
- Whether to override the default patch merge approach with a specific type. By default, the strategic
merge will typically be used.
- For example, Custom Resource Definitions typically aren't updatable by the usual strategic merge. You may
want to use C(merge) if you see "strategic merge patch format is not supported"
- See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)
- Requires openshift >= 0.6.2
- If more than one merge_type is given, the merge_types will be tried in order
- If openshift >= 0.6.2, this defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
on resource kinds that combine Custom Resources and built-in resources. For openshift < 0.6.2, the default
is simply C(strategic-merge).
choices:
- json
- merge
- strategic-merge
type: list
version_added: "2.7"
requirements:
- "python >= 2.7"
- "openshift >= 0.6"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
- name: Create a k8s namespace
k8s:
name: testing
api_version: v1
kind: Namespace
state: present
- name: Create a Service object from an inline definition
k8s:
state: present
definition:
apiVersion: v1
kind: Service
metadata:
name: web
namespace: testing
labels:
app: galaxy
service: web
spec:
selector:
app: galaxy
service: web
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000
- name: Create a Service object by reading the definition from a file
k8s:
state: present
src: /testing/service.yml
- name: Remove an existing Service object
k8s:
state: absent
api_version: v1
kind: Service
namespace: testing
name: web
# Passing the object definition from a file
- name: Create a Deployment by reading the definition from a local file
k8s:
state: present
src: /testing/deployment.yml
- name: >-
Read definition file from the Ansible controller file system.
If the definition file has been encrypted with Ansible Vault it will automatically be decrypted.
k8s:
state: present
definition: "{{ lookup('file', '/testing/deployment.yml') }}"
- name: Read definition file from the Ansible controller file system after Jinja templating
k8s:
state: present
definition: "{{ lookup('template', '/testing/deployment.yml') }}"
'''
RETURN = '''
result:
description:
- The created, patched, or otherwise present object. Will be empty in the case of a deletion.
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
items:
description: Returned only when multiple yaml documents are passed to src or resource_definition
returned: when resource_definition or src contains list of objects
type: list
'''
from ansible.module_utils.k8s.raw import KubernetesRawModule
def main():
KubernetesRawModule().execute_module()
if __name__ == '__main__':
main()
|
alexlo03/ansible
|
lib/ansible/modules/clustering/k8s/k8s.py
|
Python
|
gpl-3.0
| 5,058
|
[
"Galaxy"
] |
7260e9c1cc171e2c7addbde4ca4a479536d23652d48fbe86f1b535a8645ef148
|
#! /usr/bin/python
import json, subprocess
from argparse import ArgumentParser
from os import walk
from os.path import join, getsize
from datetime import datetime
parser = ArgumentParser(description="Tracks any changes in a specified directory. Additions, deletions,\n \
changes of files and subdirs are tracked and recorded in a log file.\n \
If user-defined thresholds are exceeded an alert is also created.\n")
parser.add_argument("-d","--dir", dest="dir", required=True, help="The directory monitored (required parameter)")
parser.add_argument("-s","--sizeabs", dest="size_abs", default=30, type=float, help="Number of MB of change in size to trigger an alert (default=30)")
parser.add_argument("-r","--sizerel", dest="size_rel", default=0.05, type=float, help="Fraction of change in size to trigger an alert (default=0.05)")
parser.add_argument("-n","--numabs", dest="num_abs", default=50, type=int, help="Number files+dirs that need to be added/deleted to trigger an alert (default=50)")
parser.add_argument("-q","--numrel", dest="num_rel", default=0.05, type=float, help="Fraction of files+dirs that need to be added/deleted to trigger an alert (default=0.05)")
parser.add_argument("-l","--logdir", dest="log_dir", default="logs/", help="The directory where log files and status information is kept (default logs/)")
parser.add_argument("--schedule", dest="daySchedule", default="", help="Defines a series of times to run the script. The argument is a continuous string with no spaces, and times are comma-separated given in this form: HH:MM,HH:MM,.. (NOT currently implemented)")
parser.add_argument("--persistentAlert", action='store_true', help="If this flag is set then the alert to the user is a foreground window that requires pressing OK to dismiss")
args = parser.parse_args()
'''
In order to find all the changes done to a folder we have to keep a detailed snapshop of the folder structure.
This means we need to know all directories and files the main directory contains (along with individual item sizes)
as well as the same information for all subdirs (recursively). Note that we only rely on file size to determine that
a file has changed or not, not on a hash generated by the file. This is acceptable because the main application of
this script is to track size changes. The script is designed to be run periodically as an agent/daemon, and thus
should function after shut downs and restarts. Hence we use two files to store needed information.
One is a .json file recording the last state snapshop: <log_dir>/Track<dir path>.json
The other is a log of the changes we find over time: <log_dir>/Track<dir path>changes.log
'''
class Tracker:
def __init__(self, root, log_dir=''):
self.root = root # the directory that we will monitor and track changes
self.log_dir = log_dir # the directory where the logs and json files will be kept
self.previous_state = self.readPrevState() # a structure to hold all dir and file info of our previous snapshot
self.current_state = {} # a structure to hold all dir and file info of the current state
self.added_dirs = [] # a list of directories added along with file and subdir info
self.deleted_dirs = [] # a list of directories deleted along with the total size and number of files in them
self.added_files = {} # a dictionary of files added. filename is the key, size is the value
self.deleted_files = {} # a dictionary of files deleted. filename is the key, size is the value
self.changed_files = {} # a dictionary of files changed. filename is the key, a tuple (old_size, new_size) is the value
self.added_total_size = 0 # The total size in bytes of all files added
self.deleted_total_size = 0 # The total size in bytes of all files deleted
self.changed_total_size = 0 # The total size in bytes of all files changes
self.added_total_num = 0 # The total number of all files added
self.deleted_total_num = 0 # The total number of all files deleted
self.changed_total_num = 0 # The total number of all files changed
self.current_total_size = 0 # The total size of all files inside the tracked dir
self.current_total_file_num = 0 # The total number of all files inside the tracked dir
self.current_total_dir_num = 0 # The total number of all subdirs inside the tracked dir
self.summary = '' # A string to contain a summary of the additions/deletions/changes
'''
Read the previous state of the root directory from a special file.
If the file does not exist or is corrupted, return an empty dict
'''
def readPrevState(self):
prev_state_filename = 'track{}.json'.format(self.root.replace('/','_'))
try:
with open(join(self.log_dir, prev_state_filename)) as state_file:
return json.load(state_file) # note: strings are returned as unicode strings
except (IOError, ValueError):
return {}
'''
Write the current state of the root directory to the special file. Overwrite file.
Note that all strings will be written as unicode. Non-ascii characters will be written as codepoints in ascii
(e.g., the letter alpha will be written as u'\u03b8'). We could write them as non-ascii utf-8 characters by
using the parameter ensure_ascii=False, but when reading this file the json.load() method will return unicode
strings anyway. Moreover, it is best practice to work with unicode strings, instead of a particular encoding.
'''
def writeCurrentState(self):
prev_state_filename = 'track{}.json'.format(self.root.replace('/','_'))
with open(join(self.log_dir, prev_state_filename), 'w') as state_file:
json.dump(self.current_state, state_file, encoding='utf-8', separators=(',', ':'))
'''
A function to return the total size of files and number of files of a deleted directory.
This implies that we are searching inside previous_state. The pathname should be a unicode string.
'''
def getSizeAndNum(self, pathname):
# Check if pathname is unicode. Left unchecked, the rest of the function's code will silently fail
# to find a regular string with non-ascii chars inside the previous_state dict.
if type(pathname) is not unicode:
raise ValueError('pathname needs to be a unicode string, you have passed:', type(pathname))
if pathname not in self.previous_state: return (0, 0)
dirs, files_with_sizes = self.previous_state[pathname]
total_size = sum(files_with_sizes.values())
total_num = len(files_with_sizes)
# Recursively visit all subdirs
for d in dirs:
subdir_size, subdir_num = self.getSizeAndNum(join(pathname, d))
total_size += subdir_size
total_num += subdir_num
return (total_size, total_num)
'''
A function to find all additions, deletions, and changes in all files and subdirs
'''
def findChanges(self):
# reset the current state
self.current_state = {}
for path, curr_dirs, curr_files in walk(self.root):
# convert the path to Unicode, keep the original path to be used in finding filesizes
path_unicode = path.decode('utf-8')
# convert dir names in Unicode
curr_dirs = [d.decode('utf-8') for d in curr_dirs]
# Find the sizes of all the files in this directory, and create a dictionary.
# Use a dictionary comprehension. Store filenames in Unicode
curr_files_and_sizes = {fname.decode('utf-8'): getsize(join(path, fname)) for fname in curr_files}
# update the total counts
self.current_total_size += sum(curr_files_and_sizes.values())
self.current_total_file_num += len(curr_files_and_sizes)
self.current_total_dir_num += 1
# update the current state
self.current_state[path_unicode] = [curr_dirs, curr_files_and_sizes]
# check is this path existed in the previous state
if path_unicode in self.previous_state:
prev_dirs, prev_files_and_sizes = self.previous_state[path_unicode]
# check if sub dirs are the same
if prev_dirs != curr_dirs:
# we only want to get the deleted dirs here, since the
# added ones will appear in the path as we walk the tree
deleted_dirs_list = list(set(prev_dirs)-set(curr_dirs))
for dname in deleted_dirs_list:
# use a recursive function to get the total size and file number in the deleted dir
dir_size , dir_file_num = self.getSizeAndNum(join(path_unicode, dname))
self.deleted_dirs.append([join(path_unicode, dname), dir_size, dir_file_num])
# check if files are the same
if prev_files_and_sizes != curr_files_and_sizes:
# find the differences
deleted_files_set = set(prev_files_and_sizes) - set(curr_files_and_sizes)
for f in deleted_files_set:
self.deleted_files[join(path_unicode,f)] = prev_files_and_sizes[f]
added_files_set = set(curr_files_and_sizes) - set(prev_files_and_sizes)
for f in added_files_set:
self.added_files[join(path_unicode,f)] = curr_files_and_sizes[f]
common_files_set = set(curr_files_and_sizes) - added_files_set
for f in common_files_set:
if prev_files_and_sizes[f] != curr_files_and_sizes[f]:
self.changed_files[join(path_unicode,f)] = (prev_files_and_sizes[f], curr_files_and_sizes[f])
else:
self.added_dirs.append([path_unicode, curr_dirs, curr_files_and_sizes])
# Finally calculate various aggregates:
# sum up the deleted files sizes, and count the files
self.deleted_total_size = sum(self.deleted_files.values())
self.deleted_total_num = len(self.deleted_files)
# add the total sizes and numbers of deleted directories
self.deleted_total_size += sum([size for p, size, num in self.deleted_dirs])
self.deleted_total_num += sum([num for p, size, num in self.deleted_dirs])
# sum up the added files sizes, and count the files
self.added_total_size = sum(self.added_files.values())
self.added_total_num = len(self.added_files)
# add the total sizes and numbers of added directories
self.added_total_size += sum([ sum(f.values()) for p, d, f in self.added_dirs])
self.added_total_num += sum([ len(f) for p, d, f in self.added_dirs])
# for changed files sum up the differences between old and new sizes for every changed file
self.changed_total_size = sum(map(lambda (old_size, new_size):abs(old_size-new_size), self.changed_files.values()))
self.changed_total_num = len(self.changed_files)
'''
Write all changes to the log file. Record the timestamp, summary of changes, and detailed list of changes.
If no changes happened, only the timestamp is recorded. If no previous snapshot is present we record
summary information of the new directory tracked.
'''
def writeChanges(self):
change_log_filename = 'track{}changes.log'.format(self.root.replace('/','_') )
with open(join(self.log_dir, change_log_filename), 'ab') as log_file:
# write a timestamp
log_file.write('\n---------------- {} ----------------\n'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
# check to see if we had no previous state, in which case we do not want to record all additions
if not self.previous_state:
log_file.write('New directory tracked.\n(Or .json file holding the previous state was deleted or corrupted)\n')
log_file.write('{} dirs and {} files occupying {}\n'.format(self.current_total_dir_num,
self.current_total_file_num,
self.humanReadableSize(self.current_total_size)))
else:
# provide a summary of the changes
report_for_added = ''; report_for_deleted =''; report_for_changed = ''
if self.added_total_size > 0:
report_for_added = 'Added: {} files totalling {}\n'.format(self.added_total_num, self.humanReadableSize(self.added_total_size))
if self.deleted_total_size > 0:
report_for_deleted = 'Deleted: {} files totalling {}\n'.format(self.deleted_total_num, self.humanReadableSize(self.deleted_total_size))
if self.changed_total_size > 0:
report_for_changed = 'Changed: {} files changed by {}\n'.format(self.changed_total_num, self.humanReadableSize(self.changed_total_size))
self.summary = report_for_added + report_for_deleted + report_for_changed
log_file.write(self.summary)
# provide a detailed list of all changes
# start with deleted dirs,
for d in self.deleted_dirs:
path, size, files_num = d
log_file.write('Deleted dir: {} contained {} in {} files\n'.format(path.encode('utf-8'), self.humanReadableSize(size), files_num))
# then added dirs
for d in self.added_dirs:
path, dirs, files_with_sizes = d
size = sum(files_with_sizes.values())
files_num = len(files_with_sizes)
log_file.write('Added dir: {}, contains {} in {} files\n'.format(path.encode('utf-8'), self.humanReadableSize(size), files_num))
# continuing with deleted/added/changed files
for f, size in self.deleted_files.iteritems():
log_file.write('Deleted file: {} was {} bytes\n'.format(f.encode('utf-8'), size))
for f, size in self.added_files.iteritems():
log_file.write('Added file: {} is {} bytes\n'.format(f.encode('utf-8'), size))
for f, (old_size, new_size) in self.changed_files.iteritems():
log_file.write('Changed file: {} from {} to {} bytes\n'.format(f.encode('utf-8'), old_size, new_size))
'''
A function to decide whether to alert the user and what kind of alert to present (persistent, or notification)
Note that size_abs is expressed in bytes, not MB
'''
def alertUser(self, size_abs, size_rel, num_abs, num_rel, persistentAlert):
if not self.previous_state: return
size = self.added_total_size + self.deleted_total_size + self.changed_total_size
num = self.added_total_num + self.deleted_total_num + self.changed_total_num
if (size > size_abs or
size > size_rel * self.current_total_size or
num > num_abs or
num > num_rel * self.current_total_file_num):
applescript = 'display notification "{}" with title "Boulis Directory Tracker"'.format(self.summary)
alt_applescript = 'display dialog "{}" with title "Boulis Directory Tracker" with icon caution buttons {{"OK"}}'.format(self.summary)
# check whether we need a persistent window or just a notification, and run the appropriate applescript
if persistentAlert:
subprocess.call("osascript -e '{}'".format(alt_applescript), shell=True)
else:
subprocess.call("osascript -e '{}'".format(applescript), shell=True)
'''
A function to print file sizes in a more human readable form (using KB, MB, GB)
It also uses valiable decimal precision for different sizes. 0 decimals for bytes and KB, 2 for MB, 3 for GB
'''
def humanReadableSize(self, num):
for unit, decimals_printed in zip(['bytes','KB','MB', 'GB'], [0, 0, 2, 3]):
if abs(num) < 1024.0:
if decimals_printed == 0:
return '{} {}'.format(int(round(num)), unit)
else:
# create the format string to fit the desired decimal precision
# use {{ and }} to escape the special characters { and }. for decimals= 3 this will return '{:.3f} {}'
format_string = '{{:.{}f}} {{}}'.format(decimals_printed)
return format_string.format(num, unit)
num /= 1024.0
# if the num is bigger than 1024 after all divisions, just use the larger unit
return '{.3f} GB'.format(num)
def singleRun(root, log_dir, size_abs, size_rel, num_abs, num_rel, persistentAlert):
t = Tracker(root, log_dir)
t.findChanges()
t.writeChanges()
t.writeCurrentState()
t.alertUser(size_abs, size_rel, num_abs, num_rel, persistentAlert)
def main_loop():
singleRun(args.dir, args.log_dir, args.size_abs *1024*1024, args.size_rel, args.num_abs, args.num_rel, args.persistentAlert)
# There are provisions to include multiple scheduled runs in the future, hence the parameter --schedule.
# Not needed for the Mac OSX environment
if __name__ == '__main__':
main_loop()
|
boulis/Track-Dir-Changes
|
trackDirFileChanges.py
|
Python
|
mit
| 17,685
|
[
"VisIt"
] |
55df842336becfb4a29a33d16768de3850f548b8f7715872043276a94bf15c37
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from urllib import quote
from Plugins.Extensions.OpenWebif.local import tstrings
from json import dumps
from Plugins.Extensions.OpenWebif.controllers.views.ajax.renderevtblock import renderEvtBlock
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885499.251363
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:39 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/ajax/multiepg.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class multiepg(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(multiepg, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def channelsInBouquet(self, **KWS):
## CHEETAH: generated from #block channelsInBouquet at line 50, col 1.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<thead>
<tr>
''')
for sname, eventlist in VFN(VFFSL(SL,"events",True),"iteritems",False)(): # generated from line 53, col 2
write(u'''\t<td class="border"><div class="service"><img src="''')
_v = VFFSL(SL,"picons",True)[VFFSL(SL,"sname",True)] # u'$(picons[$sname])' on line 54, col 52
if _v is not None: write(_filter(_v, rawExpr=u'$(picons[$sname])')) # from line 54, col 52.
write(u'''" /> ''')
_v = VFFSL(SL,"sname",True) # u'$sname' on line 54, col 74
if _v is not None: write(_filter(_v, rawExpr=u'$sname')) # from line 54, col 74.
write(u'''</div></td>
''')
write(u'''</tr>
</thead>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<style>
\ttable { font-family: Verdana; font-size: 11px; }
\ttr { vertical-align: top }
\t.service { font-weight: bold; font-size: 12px; color:#fff; background-color: #1c47ae; line-height:30px; padding: 3px; white-space: nowrap; overflow: hidden; width: 184px}
\t.service img { width:50px; height:30px; float:left; margin-right:10px; }
\t.title { font-weight: bold; color: #061c37; }
\t.desc { font-size: 10px; color: #176093; }
\t.even { background-color: #dfeffc; }
\t.border { border-right: 1px solid #4297d7; }
\t.event { cursor: pointer; width: 190px; overflow:hidden; }
\t.bq { background-color: #1c478e; font-size: 11px; font-weight: bold; color: #fff; padding: 2px 4px; line-height: 18px; cursor: pointer; white-space: nowrap; }
\t.bq.selected { color: #A9D1FA; }
\t.plus { background-color: #dfeffc; font-size: 13px; font-weight: bold; color: #1c478e; padding: 2px 4px; line-height: 21px; cursor: pointer; white-space: nowrap; }
\t.plus.selected { color: #ea7409; }
\t.timer { color: #f00; font-weight: bold; font-size: 10px; }
\t.timer.disabled { color: #f80; }
\t#eventdescription { width: 375px; height: auto; position: fixed; top: 205px; left: 350px; z-index: 1000; display: none; overflow: auto; }
.fht-table,.fht-table thead,.fht-table tfoot,.fht-table tbody,.fht-table tr,.fht-table th,.fht-table td{font-size:100%;font:inherit;vertical-align:top;margin:0;padding:0}
.fht-table{border-collapse:collapse;border-spacing:0}
.fht-table-wrapper,.fht-table-wrapper .fht-thead,.fht-table-wrapper .fht-tfoot,.fht-table-wrapper .fht-fixed-column .fht-tbody,.fht-table-wrapper .fht-fixed-body .fht-tbody,.fht-table-wrapper .fht-tbody{overflow:hidden;position:relative}
.fht-table-wrapper .fht-fixed-body .fht-tbody,.fht-table-wrapper .fht-tbody{overflow:auto}
.fht-table-wrapper .fht-table .fht-cell{overflow:hidden;height:1px}
.fht-table-wrapper .fht-fixed-column,.fht-table-wrapper .fht-fixed-body{top:0;left:0;position:absolute}
.fht-table-wrapper .fht-fixed-column{z-index:1}
}
</style>
<table style="margin:0">
<tr>
''')
for slot in range(0,7): # generated from line 34, col 1
write(u''' <td class="plus ''')
if VFFSL(SL,"slot",True)==VFFSL(SL,"day",True) : # generated from line 35, col 21
_v = 'selected'
if _v is not None: write(_filter(_v))
else:
_v = ''
if _v is not None: write(_filter(_v))
write(u'''" js:day="''')
_v = VFFSL(SL,"slot",True) # u'$(slot)' on line 35, col 72
if _v is not None: write(_filter(_v, rawExpr=u'$(slot)')) # from line 35, col 72.
write(u'''">''')
_v = VFFSL(SL,"tstrings",True)[("day_" + (time.strftime("%w", time.localtime(time.time()+86400*slot))))] # u'$tstrings[("day_" + (time.strftime("%w", time.localtime(time.time()+86400*slot))))]' on line 35, col 81
if _v is not None: write(_filter(_v, rawExpr=u'$tstrings[("day_" + (time.strftime("%w", time.localtime(time.time()+86400*slot))))]')) # from line 35, col 81.
write(u'''</td>
''')
write(u'''</tr>
</table>
<table>
<tr>
''')
for bq in VFFSL(SL,"bouquets",True): # generated from line 42, col 1
write(u'''<td class="bq ''')
if VFFSL(SL,"bq",True)[0]==VFFSL(SL,"bref",True) : # generated from line 43, col 15
_v = 'selected'
if _v is not None: write(_filter(_v))
else:
_v = ''
if _v is not None: write(_filter(_v))
write(u'''" js:ref="''')
_v = VFFSL(SL,"quote",False)(VFFSL(SL,"bq",True)[0]) # u'$quote($bq[0])' on line 43, col 68
if _v is not None: write(_filter(_v, rawExpr=u'$quote($bq[0])')) # from line 43, col 68.
write(u'''">''')
_v = VFFSL(SL,"bq",True)[1] # u'$bq[1]' on line 43, col 84
if _v is not None: write(_filter(_v, rawExpr=u'$bq[1]')) # from line 43, col 84.
write(u'''</td>
''')
write(u'''</tr>
</table>
''')
renderEventBlock = VFFSL(SL,"renderEvtBlock",False)()
write(u'''<table cellpadding="0" cellspacing="0" id="TBL1">
''')
self.channelsInBouquet(trans=trans)
write(u'''<tbody>
''')
hasEvents = False
for slot in range(0,12): # generated from line 61, col 2
write(u'''<tr class="''')
_v = VFFSL(SL,"slot",True)%2 and 'odd' or 'even' # u"$(slot%2 and 'odd' or 'even')" on line 62, col 12
if _v is not None: write(_filter(_v, rawExpr=u"$(slot%2 and 'odd' or 'even')")) # from line 62, col 12.
write(u'''">
''')
for sname, eventlist in VFN(VFFSL(SL,"events",True),"iteritems",False)(): # generated from line 63, col 2
write(u'''<td class="border">
''')
for event in VFFSL(SL,"eventlist",True)[VFFSL(SL,"slot",True)]: # generated from line 65, col 2
write(u'''\t\t''')
_v = VFN(VFFSL(SL,"renderEventBlock",True),"render",False)(VFFSL(SL,"event",True)) # u'$renderEventBlock.render($event)' on line 66, col 3
if _v is not None: write(_filter(_v, rawExpr=u'$renderEventBlock.render($event)')) # from line 66, col 3.
write(u'''
''')
hasEvents = True
write(u'''</td>
''')
write(u'''</tr>
''')
write(u'''</tbody>
</table>
<div id="eventdescription"></div>
<div id="editTimerForm" title="''')
_v = VFFSL(SL,"tstrings",True)['edit_timer'] # u"$tstrings['edit_timer']" on line 76, col 32
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['edit_timer']")) # from line 76, col 32.
write(u'''"></div>
<script>
var picons = ''')
_v = VFFSL(SL,"dumps",False)(VFFSL(SL,"picons",True)) # u'$dumps($picons)' on line 79, col 14
if _v is not None: write(_filter(_v, rawExpr=u'$dumps($picons)')) # from line 79, col 14.
write(u''';
var reloadTimers = false;
$(".bq").click(function() {
var id = $(this).attr("js:ref");
$("#tvcontent").html(loadspinner).load(\'ajax/multiepg?bref=\'+id);
});
$(".event").click(function() {
var id = $(this).attr("js:id");
var ref = $(this).attr("js:ref");
$("#eventdescription").load(\'ajax/event?idev=\'+id+\'&sref=\'+escape(ref), function() {
\t\t$("#eventdescription").show(200).draggable( { handle: ".handle" } );
});
});
$(".plus").click(function() {
\tvar day = $(this).attr("js:day");
\t$("#tvcontent").html(loadspinner).load(\'ajax/multiepg?bref=''')
_v = VFFSL(SL,"quote",False)(VFFSL(SL,"bref",True)) # u'${quote($bref)}' on line 94, col 62
if _v is not None: write(_filter(_v, rawExpr=u'${quote($bref)}')) # from line 94, col 62.
write(u'''&day=\'+day);
});
$(\'#editTimerForm\').load(\'/ajax/edittimer\');
</script>
<script type="text/javascript" src="/js/jquery.fixedheadertable.min.js"></script>
<script>
$(function() {
$(\'#TBL1\').fixedHeaderTable({
\tfooter: true,
\tcloneHeadToFoot: true,
\taltClass: \'odd\',
\tautoShow: true
});
});
</script>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_multiepg= 'respond'
## END CLASS DEFINITION
if not hasattr(multiepg, '_initCheetahAttributes'):
templateAPIClass = getattr(multiepg, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(multiepg)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=multiepg()).run()
|
MOA-2011/enigma2-plugin-extensions-openwebif
|
plugin/controllers/views/ajax/multiepg.py
|
Python
|
gpl-2.0
| 13,136
|
[
"VisIt"
] |
fba366ad7479b53a98aa76b122f9e1d5de10a5382948b552b87a95d18b6f9baf
|
"""mbe.file_templates: (Default) file templates for dealing with PBS,
ORCA, Q-Chem, and Psi."""
def eprfile(charge, multiplicity, xyzfile):
"""
A default template for an EPR input file.
"""
return """! uks pbe0 def2-sv(p) def2-svp/jk ri rijk pmodel somf(1x) noautostart tightscf grid5
%pal
nprocs 1
end
* xyzfile {charge} {multiplicity} {xyzfile}.xyz *
%eprnmr
tol 1e-10
gtensor 1
ori centerofelcharge
printlevel 5
end
""".format(charge=charge, multiplicity=multiplicity, xyzfile=xyzfile)
def pbsfile(xyzfile):
"""
A default template for a PBS job file.
"""
return """#!/bin/bash
#PBS -N {xyzfile}
#PBS -q shared_large
#PBS -l nodes=1:ppn=1
#PBS -l walltime=144:00:00
#PBS -j oe
#PBS -l qos=low
#PBS -m abe
#PBS -M erb74@pitt.edu
module purge
module load orca/3.0.3
cp $PBS_O_WORKDIR/{xyzfile}.in $LOCAL
cp $PBS_O_WORKDIR/{xyzfile}.xyz $LOCAL
cd $LOCAL
run_on_exit() {{
set -v
cp $LOCAL/* $PBS_O_WORKDIR
}}
trap run_on_exit EXIT
$(which orca) {xyzfile}.in >& $PBS_O_WORKDIR/{xyzfile}.out
""".format(xyzfile=xyzfile)
|
berquist/mbe
|
mbe/file_templates.py
|
Python
|
mpl-2.0
| 1,077
|
[
"ORCA",
"Q-Chem"
] |
1970e1af073e737cdb99c7580412ea4f737b1949e6952b6167f0e91c1397dd53
|
#!/usr/bin/python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys
import os
import glob
import re
DriverPath = ''
InsertPath = '/../../../'
if (len(sys.argv) == 2):
DriverPath = sys.argv[1] + '/'
sys.path.insert(0, os.path.abspath(os.getcwd()))
def pts(category, pyfile):
print('Auto-documenting %s file %s' % (category, pyfile))
# Main driver modules in psi4/driver
fdriver = open('source/autodoc_driver.rst', 'w')
fdriver.write('\n.. include:: /autodoc_abbr_options_c.rst\n\n')
fdriver.write('.. _`sec:driver`:\n\n')
fdriver.write('=============\n')
fdriver.write('Python Driver\n')
fdriver.write('=============\n\n')
for pyfile in glob.glob(DriverPath + '../../psi4/driver/*.py'):
filename = os.path.split(pyfile)[1]
basename = os.path.splitext(filename)[0]
div = '=' * len(basename)
if basename not in ['inpsight', 'pep8', 'diatomic_fits', 'pyparsing', 'computation_cache']:
pts('driver', basename)
fdriver.write(basename + '\n')
fdriver.write(div + '\n\n')
fdriver.write('.. automodule:: %s\n' % (basename))
fdriver.write(' :members:\n')
fdriver.write(' :undoc-members:\n')
if basename == 'driver':
fdriver.write(' :exclude-members: energy, optimize, opt, frequency, frequencies, freq, property, prop, molden, gdma, fchk, gradient, hessian\n')
elif basename == 'wrapper_database':
fdriver.write(' :exclude-members: db, database\n')
elif basename == 'driver_nbody':
fdriver.write(' :exclude-members: nbody_gufunc\n')
elif basename == 'driver_cbs':
fdriver.write(' :exclude-members: cbs, complete_basis_set, xtpl_highest_1,\n')
fdriver.write(' scf_xtpl_helgaker_3, scf_xtpl_helgaker_2, corl_xtpl_helgaker_2, n_body\n')
# elif basename == 'physconst':
# fdriver.write('\n.. literalinclude:: %sdriver/%s\n' % (IncludePath, filename))
elif basename == 'diatomic':
fdriver.write(' :exclude-members: anharmonicity\n')
# elif basename == 'interface_dftd3':
# fdriver.write(' :exclude-members: run_dftd3\n')
# elif basename == 'interface_cfour':
# fdriver.write(' :exclude-members: run_cfour\n')
elif basename == 'aliases':
fdriver.write(' :exclude-members: sherrill_gold_standard, allen_focal_point\n')
elif basename == 'p4util':
fdriver.write(' :exclude-members: oeprop, cubeprop\n')
elif basename == 'procedures':
fdriver.write(' :exclude-members: interface_cfour\n')
fdriver.write('\n')
# Python-only plugin modules in psi4/driver
for basename in os.walk(DriverPath + '../../psi4/driver').next()[1]:
div = '=' * len(basename)
if basename not in ['grendel']:
pts('driver', basename)
fdriver.write(basename + '\n')
fdriver.write(div + '\n\n')
fdriver.write('.. automodule:: %s\n' % (basename))
fdriver.write(' :members:\n')
fdriver.write(' :undoc-members:\n')
for pyfile in glob.glob(DriverPath + '../../psi4/driver/' + basename + '/*py'):
filename = os.path.split(pyfile)[1]
basename2 = os.path.splitext(filename)[0]
div = '=' * len(basename2)
fdriver.write('.. automodule:: %s.%s\n' % (basename, basename2))
fdriver.write(' :members:\n')
fdriver.write(' :undoc-members:\n')
if basename == 'qcdb' and basename2 == 'interface_dftd3':
fdriver.write(' :exclude-members: run_dftd3\n')
fdriver.write('\n')
fdriver.write('\n')
fdriver.close()
|
kannon92/psi4
|
doc/sphinxman/document_driver.py
|
Python
|
gpl-2.0
| 4,584
|
[
"Psi4"
] |
ced822ada896e9fb6a4842d481ed5e8510c32d2683e796db93676a3d02feabda
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wikipedia dataset containing cleaned articles of all languages."""
import bz2
import codecs
import json
import re
import xml.etree.cElementTree as etree
from absl import logging
import six
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
from absl import flags # pylint:disable=g-bad-import-order,g-import-not-at-top
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"wikipedia_auto_select_flume_mode", True,
"If True, will automatically determine whether to run flume on borg or "
"locally based on the dump size for each language.")
_CITATION = """\
@ONLINE {wikidump,
author = "Wikimedia Foundation",
title = "Wikimedia Downloads",
url = "https://dumps.wikimedia.org"
}
"""
_DESCRIPTION = """\
Wikipedia dataset containing cleaned articles of all languages.
The datasets are built from the Wikipedia dump
(https://dumps.wikimedia.org/) with one split per language. Each example
contains the content of one full Wikipedia article with cleaning to strip
markdown and unwanted sections (references, etc.).
"""
_LICENSE = (
"This work is licensed under the Creative Commons Attribution-ShareAlike "
"3.0 Unported License. To view a copy of this license, visit "
"http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
"Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.")
# Source: https://en.wikipedia.org/wiki/List_of_Wikipedias (accessed 3/1/2019)
# Removed because no articles: hz.
WIKIPEDIA_LANGUAGES = [
"aa", "ab", "ace", "ady", "af", "ak", "als", "am", "an", "ang", "ar", "arc",
"arz", "as", "ast", "atj", "av", "ay", "az", "azb", "ba", "bar", "bat-smg",
"bcl", "be", "be-x-old", "bg", "bh", "bi", "bjn", "bm", "bn", "bo", "bpy",
"br", "bs", "bug", "bxr", "ca", "cbk-zam", "cdo", "ce", "ceb", "ch", "cho",
"chr", "chy", "ckb", "co", "cr", "crh", "cs", "csb", "cu", "cv", "cy", "da",
"de", "din", "diq", "dsb", "dty", "dv", "dz", "ee", "el", "eml", "en", "eo",
"es", "et", "eu", "ext", "fa", "ff", "fi", "fiu-vro", "fj", "fo", "fr",
"frp", "frr", "fur", "fy", "ga", "gag", "gan", "gd", "gl", "glk", "gn",
"gom", "gor", "got", "gu", "gv", "ha", "hak", "haw", "he", "hi", "hif",
"ho", "hr", "hsb", "ht", "hu", "hy", "ia", "id", "ie", "ig", "ii", "ik",
"ilo", "inh", "io", "is", "it", "iu", "ja", "jam", "jbo", "jv", "ka", "kaa",
"kab", "kbd", "kbp", "kg", "ki", "kj", "kk", "kl", "km", "kn", "ko", "koi",
"krc", "ks", "ksh", "ku", "kv", "kw", "ky", "la", "lad", "lb", "lbe", "lez",
"lfn", "lg", "li", "lij", "lmo", "ln", "lo", "lrc", "lt", "ltg", "lv",
"mai", "map-bms", "mdf", "mg", "mh", "mhr", "mi", "min", "mk", "ml", "mn",
"mr", "mrj", "ms", "mt", "mus", "mwl", "my", "myv", "mzn", "na", "nah",
"nap", "nds", "nds-nl", "ne", "new", "ng", "nl", "nn", "no", "nov", "nrm",
"nso", "nv", "ny", "oc", "olo", "om", "or", "os", "pa", "pag", "pam", "pap",
"pcd", "pdc", "pfl", "pi", "pih", "pl", "pms", "pnb", "pnt", "ps", "pt",
"qu", "rm", "rmy", "rn", "ro", "roa-rup", "roa-tara", "ru", "rue", "rw",
"sa", "sah", "sat", "sc", "scn", "sco", "sd", "se", "sg", "sh", "si",
"simple", "sk", "sl", "sm", "sn", "so", "sq", "sr", "srn", "ss", "st",
"stq", "su", "sv", "sw", "szl", "ta", "tcy", "te", "tet", "tg", "th", "ti",
"tk", "tl", "tn", "to", "tpi", "tr", "ts", "tt", "tum", "tw", "ty", "tyv",
"udm", "ug", "uk", "ur", "uz", "ve", "vec", "vep", "vi", "vls", "vo", "wa",
"war", "wo", "wuu", "xal", "xh", "xmf", "yi", "yo", "za", "zea", "zh",
"zh-classical", "zh-min-nan", "zh-yue", "zu"
]
# Use mirror (your.org) to avoid download caps.
_BASE_URL_TMPL = "https://dumps.wikimedia.your.org/{lang}wiki/{date}/"
_INFO_FILE = "dumpstatus.json"
class WikipediaConfig(tfds.core.BuilderConfig):
"""BuilderConfig for Wikipedia."""
def __init__(self, *, language=None, date=None, **kwargs):
"""BuilderConfig for Wikipedia.
Args:
language: string, the language code for the Wikipedia dump to use.
date: string, date of the Wikipedia dump in YYYYMMDD format. A list of
available dates can be found at https://dumps.wikimedia.org/enwiki/.
**kwargs: keyword arguments forwarded to super.
"""
super(WikipediaConfig, self).__init__(
name=f"{date}.{language}",
description=f"Wikipedia dataset for {language}, parsed from {date} dump.",
**kwargs)
self.date = date
self.language = language
class Wikipedia(tfds.core.BeamBasedBuilder):
"""Wikipedia dataset."""
VERSION = tfds.core.Version("1.0.0")
RELEASE_NOTES = {
"1.0.0": "New split API (https://tensorflow.org/datasets/splits)",
}
BUILDER_CONFIGS = [
WikipediaConfig(language=lang, date="20201201")
for lang in WIKIPEDIA_LANGUAGES
] + [
# Old versions files do not exists anymore but config are kept as
# previously generated datasets can still be read.
WikipediaConfig(language=lang, date="20200301")
for lang in WIKIPEDIA_LANGUAGES
] + [
# Old versions files do not exists anymore but config are kept as
# previously generated datasets can still be read.
WikipediaConfig(language=lang, date="20190301")
for lang in WIKIPEDIA_LANGUAGES
]
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"title": tfds.features.Text(),
"text": tfds.features.Text(),
}),
# No default supervised_keys.
supervised_keys=None,
homepage="https://dumps.wikimedia.org",
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
def _base_url(lang):
return _BASE_URL_TMPL.format(
lang=lang.replace("-", "_"), date=self._builder_config.date)
lang = self._builder_config.language
info_url = _base_url(lang) + _INFO_FILE
# Use dictionary since testing mock always returns the same result.
downloaded_files = dl_manager.download_and_extract({"info": info_url})
xml_urls = []
total_bytes = 0
with tf.io.gfile.GFile(downloaded_files["info"]) as f:
dump_info = json.load(f)
multistream_dump_info = dump_info["jobs"]["articlesmultistreamdump"]
assert multistream_dump_info["status"] == "done", (
"Specified dump (%s) multistream status is not 'done': %s" %
(_base_url(lang), multistream_dump_info["status"]))
for fname, info in multistream_dump_info["files"].items():
if ".xml" not in fname:
continue
total_bytes += info["size"]
xml_urls.append(_base_url(lang) + fname)
# Use dictionary since testing mock always returns the same result.
downloaded_files = dl_manager.download({"xml": xml_urls})
return {
tfds.Split.TRAIN: self._generate_examples(downloaded_files["xml"], lang)
}
def _generate_examples(self, filepaths, language):
"""Build PCollection of examples in the raw (text) form."""
beam = tfds.core.lazy_imports.apache_beam
def _extract_content(filepath):
"""Extracts article content from a single WikiMedia XML file."""
logging.info("generating examples from = %s", filepath)
with tf.io.gfile.GFile(filepath, "rb") as f:
f = bz2.BZ2File(filename=f)
if six.PY3:
# Workaround due to:
# https://github.com/tensorflow/tensorflow/issues/33563
utf_f = codecs.getreader("utf-8")(f) # pytype: disable=wrong-arg-types
else:
utf_f = f
# To clear root, to free-up more memory than just `elem.clear()`.
context = etree.iterparse(utf_f, events=("end",)) # pytype: disable=wrong-arg-types
context = iter(context)
unused_event, root = next(context)
for unused_event, elem in context:
if not elem.tag.endswith("page"):
continue
namespace = elem.tag[:-4]
title = elem.find("./{0}title".format(namespace)).text
ns = elem.find("./{0}ns".format(namespace)).text
id_ = elem.find("./{0}id".format(namespace)).text
# Filter pages that are not in the "main" namespace.
if ns != "0":
root.clear()
continue
raw_content = elem.find(
"./{0}revision/{0}text".format(namespace)).text
root.clear()
# Filter redirects.
if raw_content is None or raw_content.lower().startswith("#redirect"):
beam.metrics.Metrics.counter(language, "filtered-redirects").inc()
continue
beam.metrics.Metrics.counter(language, "extracted-examples").inc()
yield (id_, title, raw_content)
def _clean_content(inputs):
"""Cleans raw wikicode to extract text."""
id_, title, raw_content = inputs
try:
text = _parse_and_clean_wikicode(raw_content)
except (tfds.core.lazy_imports.mwparserfromhell.parser.ParserError) as e:
beam.metrics.Metrics.counter(language, "parser-error").inc()
logging.error("mwparserfromhell ParseError: %s", e)
return
if not text:
beam.metrics.Metrics.counter(language, "empty-clean-examples").inc()
return
beam.metrics.Metrics.counter(language, "cleaned-examples").inc()
yield id_, {"title": title, "text": text}
return (beam.Create(filepaths)
| beam.FlatMap(_extract_content)
| beam.transforms.Reshuffle()
| beam.FlatMap(_clean_content))
def _parse_and_clean_wikicode(raw_content):
"""Strips formatting and unwanted sections from raw page content."""
wikicode = tfds.core.lazy_imports.mwparserfromhell.parse(raw_content)
# Filters for references, tables, and file/image links.
re_rm_wikilink = re.compile(
"^(?:File|Image|Media):", flags=re.IGNORECASE | re.UNICODE)
def rm_wikilink(obj):
return bool(re_rm_wikilink.match(six.text_type(obj.title))) # pytype: disable=wrong-arg-types
def rm_tag(obj):
return six.text_type(obj.tag) in {"ref", "table"}
def rm_template(obj):
return obj.name.lower() in {
"reflist", "notelist", "notelist-ua", "notelist-lr", "notelist-ur",
"notelist-lg"
}
def try_remove_obj(obj, section):
try:
section.remove(obj)
except ValueError:
# For unknown reasons, objects are sometimes not found.
pass
section_text = []
# Filter individual sections to clean.
for section in wikicode.get_sections(
flat=True, include_lead=True, include_headings=True):
for obj in section.ifilter_wikilinks(matches=rm_wikilink, recursive=True):
try_remove_obj(obj, section)
for obj in section.ifilter_templates(matches=rm_template, recursive=True):
try_remove_obj(obj, section)
for obj in section.ifilter_tags(matches=rm_tag, recursive=True):
try_remove_obj(obj, section)
section_text.append(section.strip_code().strip())
return "\n\n".join(section_text)
|
tensorflow/datasets
|
tensorflow_datasets/text/wikipedia.py
|
Python
|
apache-2.0
| 11,591
|
[
"VisIt"
] |
eb9d98b03e9bb2e5ffb1b02ab53231a227282b42da12f70fbad1f485a8971a1e
|
"""
Provides functions for working with and visualizing scalar ARMA processes.
TODO: 1. Fix warnings concerning casting complex variables back to floats
"""
import numpy as np
from numpy import conj
from .util import check_random_state
class ARMA:
r"""
This class represents scalar ARMA(p, q) processes.
If phi and theta are scalars, then the model is
understood to be
.. math::
X_t = \phi X_{t-1} + \epsilon_t + \theta \epsilon_{t-1}
where :math:`\epsilon_t` is a white noise process with standard
deviation :math:`\sigma`. If phi and theta are arrays or sequences,
then the interpretation is the ARMA(p, q) model
.. math::
X_t = \phi_1 X_{t-1} + ... + \phi_p X_{t-p} +
\epsilon_t + \theta_1 \epsilon_{t-1} + ... +
\theta_q \epsilon_{t-q}
where
* :math:`\phi = (\phi_1, \phi_2,..., \phi_p)`
* :math:`\theta = (\theta_1, \theta_2,..., \theta_q)`
* :math:`\sigma` is a scalar, the standard deviation of the
white noise
Parameters
----------
phi : scalar or iterable or array_like(float)
Autocorrelation values for the autocorrelated variable.
See above for explanation.
theta : scalar or iterable or array_like(float)
Autocorrelation values for the white noise of the model.
See above for explanation
sigma : scalar(float)
The standard deviation of the white noise
Attributes
----------
phi, theta, sigma : see Parmeters
ar_poly : array_like(float)
The polynomial form that is needed by scipy.signal to do the
processing we desire. Corresponds with the phi values
ma_poly : array_like(float)
The polynomial form that is needed by scipy.signal to do the
processing we desire. Corresponds with the theta values
"""
def __init__(self, phi, theta=0, sigma=1):
self._phi, self._theta = phi, theta
self.sigma = sigma
self.set_params()
def __repr__(self):
m = "ARMA(phi=%s, theta=%s, sigma=%s)"
return m % (self.phi, self.theta, self.sigma)
def __str__(self):
m = "An ARMA({p}, {q}) process"
p = np.asarray(self.phi).size
q = np.asarray(self.theta).size
return m.format(p=p, q=q)
# Special latex print method for working in notebook
def _repr_latex_(self):
m = r"$X_t = "
phi = np.atleast_1d(self.phi)
theta = np.atleast_1d(self.theta)
rhs = ""
for (tm, phi_p) in enumerate(phi):
# don't include terms if they are equal to zero
if abs(phi_p) > 1e-12:
rhs += r"%+g X_{t-%i}" % (phi_p, tm+1)
if rhs[0] == "+":
rhs = rhs[1:] # remove initial `+` if phi_1 was positive
rhs += r" + \epsilon_t"
for (tm, th_q) in enumerate(theta):
# don't include terms if they are equal to zero
if abs(th_q) > 1e-12:
rhs += r"%+g \epsilon_{t-%i}" % (th_q, tm+1)
return m + rhs + "$"
@property
def phi(self):
return self._phi
@phi.setter
def phi(self, new_value):
self._phi = new_value
self.set_params()
@property
def theta(self):
return self._theta
@theta.setter
def theta(self, new_value):
self._theta = new_value
self.set_params()
def set_params(self):
r"""
Internally, scipy.signal works with systems of the form
.. math::
ar_{poly}(L) X_t = ma_{poly}(L) \epsilon_t
where L is the lag operator. To match this, we set
.. math::
ar_{poly} = (1, -\phi_1, -\phi_2,..., -\phi_p)
ma_{poly} = (1, \theta_1, \theta_2,..., \theta_q)
In addition, ar_poly must be at least as long as ma_poly.
This can be achieved by padding it out with zeros when required.
"""
# === set up ma_poly === #
ma_poly = np.asarray(self._theta)
self.ma_poly = np.insert(ma_poly, 0, 1) # The array (1, theta)
# === set up ar_poly === #
if np.isscalar(self._phi):
ar_poly = np.array(-self._phi)
else:
ar_poly = -np.asarray(self._phi)
self.ar_poly = np.insert(ar_poly, 0, 1) # The array (1, -phi)
# === pad ar_poly with zeros if required === #
if len(self.ar_poly) < len(self.ma_poly):
temp = np.zeros(len(self.ma_poly) - len(self.ar_poly))
self.ar_poly = np.hstack((self.ar_poly, temp))
def impulse_response(self, impulse_length=30):
"""
Get the impulse response corresponding to our model.
Returns
-------
psi : array_like(float)
psi[j] is the response at lag j of the impulse response.
We take psi[0] as unity.
"""
from scipy.signal import dimpulse
sys = self.ma_poly, self.ar_poly, 1
times, psi = dimpulse(sys, n=impulse_length)
psi = psi[0].flatten() # Simplify return value into flat array
return psi
def spectral_density(self, two_pi=True, res=1200):
r"""
Compute the spectral density function. The spectral density is
the discrete time Fourier transform of the autocovariance
function. In particular,
.. math::
f(w) = \sum_k \gamma(k) \exp(-ikw)
where gamma is the autocovariance function and the sum is over
the set of all integers.
Parameters
----------
two_pi : Boolean, optional
Compute the spectral density function over :math:`[0, \pi]` if
two_pi is False and :math:`[0, 2 \pi]` otherwise. Default value is
True
res : scalar or array_like(int), optional(default=1200)
If res is a scalar then the spectral density is computed at
`res` frequencies evenly spaced around the unit circle, but
if res is an array then the function computes the response
at the frequencies given by the array
Returns
-------
w : array_like(float)
The normalized frequencies at which h was computed, in
radians/sample
spect : array_like(float)
The frequency response
"""
from scipy.signal import freqz
w, h = freqz(self.ma_poly, self.ar_poly, worN=res, whole=two_pi)
spect = h * conj(h) * self.sigma**2
return w, spect
def autocovariance(self, num_autocov=16):
"""
Compute the autocovariance function from the ARMA parameters
over the integers range(num_autocov) using the spectral density
and the inverse Fourier transform.
Parameters
----------
num_autocov : scalar(int), optional(default=16)
The number of autocovariances to calculate
"""
spect = self.spectral_density()[1]
acov = np.fft.ifft(spect).real
# num_autocov should be <= len(acov) / 2
return acov[:num_autocov]
def simulation(self, ts_length=90, random_state=None):
"""
Compute a simulated sample path assuming Gaussian shocks.
Parameters
----------
ts_length : scalar(int), optional(default=90)
Number of periods to simulate for
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
vals : array_like(float)
A simulation of the model that corresponds to this class
"""
from scipy.signal import dlsim
random_state = check_random_state(random_state)
sys = self.ma_poly, self.ar_poly, 1
u = random_state.randn(ts_length, 1) * self.sigma
vals = dlsim(sys, u)[1]
return vals.flatten()
|
oyamad/QuantEcon.py
|
quantecon/arma.py
|
Python
|
bsd-3-clause
| 8,055
|
[
"Gaussian"
] |
8313c22b07ee1a597bae0a10d2e0bc1c67d38863f8d5154ee29e653bd818215d
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors: Kyle A Beauchamp
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import tempfile, os
import functools
from mdtraj.utils import enter_temp_directory
from mdtraj.testing import get_fn, eq, assert_raises, SkipTest
import numpy as np
import mdtraj as md
import mdtraj.utils
from mdtraj.utils import six
from mdtraj.utils.six.moves import xrange
from mdtraj.core import element
fn = get_fn('traj.h5')
nat = get_fn('native.pdb')
tmpfns = {}
for suffix, (fd, temp) in {
'xtc' : tempfile.mkstemp(suffix='.xtc'),
'dcd' : tempfile.mkstemp(suffix='.dcd'),
'binpos' : tempfile.mkstemp(suffix='.binpos'),
'trr' : tempfile.mkstemp(suffix='.trr'),
'h5' : tempfile.mkstemp(suffix='.h5'),
'pdb' : tempfile.mkstemp(suffix='.pdb'),
'pdb.gz' : tempfile.mkstemp(suffix='.pdb.gz'),
'nc' : tempfile.mkstemp(suffix='.nc'),
'lh5' : tempfile.mkstemp(suffix='.lh5'),
'lammpstrj' : tempfile.mkstemp(suffix='.lammpstrj'),
'xyz' : tempfile.mkstemp(suffix='.xyz')}.items():
os.close(fd)
tmpfns[suffix] = temp
def teardown_module(module):
"""remove the temporary file created by tests in this file
this gets automatically called by nose"""
for e in tmpfns.values():
os.unlink(e)
def test_mismatch():
# loading a 22 atoms xtc with a topology that has 2,000 atoms
# some kind of error should happen!
assert_raises(ValueError, lambda: md.load(get_fn('frame0.xtc'), top=get_fn('4K6Q.pdb')))
def test_box():
t = md.load(get_fn('native.pdb'))
yield lambda: eq(t.unitcell_vectors, None)
yield lambda: eq(t.unitcell_lengths, None)
yield lambda: eq(t.unitcell_angles, None)
yield lambda: eq(t.unitcell_volumes, None)
t.unitcell_vectors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).reshape(1, 3, 3)
yield lambda: eq(np.array([1.0, 1.0, 1.0]), t.unitcell_lengths[0])
yield lambda: eq(np.array([90.0, 90.0, 90.0]), t.unitcell_angles[0])
yield lambda: eq(np.array([1.0]), t.unitcell_volumes)
def test_load_pdb_box():
t = md.load(get_fn('native2.pdb'))
yield lambda: eq(t.unitcell_lengths[0], np.array([0.1, 0.2, 0.3]))
yield lambda: eq(t.unitcell_angles[0], np.array([90.0, 90.0, 90.0]))
yield lambda: eq(t.unitcell_vectors[0], np.array([[0.1, 0, 0], [0, 0.2, 0], [0, 0, 0.3]]))
def test_load_pdb_gz():
t = md.load(get_fn('1ncw.pdb.gz'))
yield lambda: eq(t.n_atoms, 3990)
def test_box_load_save():
t = md.load(get_fn('native2.pdb'))
# these four tempfile have extensions (dcd, xtc, trr, h5) that
# should store the box information. lets make sure than through a load/save
# cycle, the box information is preserved:
for temp_fn in [tmpfns['xtc'], tmpfns['dcd'], tmpfns['trr'], tmpfns['h5']]:
t.save(temp_fn)
if temp_fn.endswith('.h5'):
t2 = md.load(temp_fn)
else:
t2 = md.load(temp_fn, top=get_fn('native.pdb'))
assert t.unitcell_vectors is not None
yield lambda: eq(t.xyz, t2.xyz, decimal=3)
yield lambda: eq(t.unitcell_vectors, t2.unitcell_vectors)
yield lambda: eq(t.unitcell_angles, t2.unitcell_angles)
yield lambda: eq(t.unitcell_lengths, t2.unitcell_lengths)
def test_slice():
t = md.load(fn)
yield lambda: eq((t[0:5] + t[5:10]).xyz, t[0:10].xyz)
yield lambda: eq((t[0:5] + t[5:10]).time, t[0:10].time)
yield lambda: eq((t[0:5] + t[5:10]).unitcell_vectors, t[0:10].unitcell_vectors)
yield lambda: eq((t[0:5] + t[5:10]).unitcell_lengths, t[0:10].unitcell_lengths)
yield lambda: eq((t[0:5] + t[5:10]).unitcell_angles, t[0:10].unitcell_angles)
def test_slice2():
t = md.load(get_fn('traj.h5'))
yield lambda: t[0] == t[[0,1]][0]
def test_xtc():
t = md.load(get_fn('frame0.xtc'), top=nat)
for e in [tmpfns['xtc'], tmpfns['dcd'], tmpfns['binpos'], tmpfns['trr'], tmpfns['h5'], tmpfns['pdb'], tmpfns['pdb.gz'], tmpfns['nc']]:
def f():
t.save(e)
t2 = md.load(e, top=nat)
eq(t.xyz, t2.xyz, err_msg=e)
# ony trr and xtc save the time that we read from the original
# xtc format
if e.endswith('.trr') or e.endswith('.xtc'):
eq(t.time, t2.time, err_msg=e)
yield f
def test_dcd():
t = md.load(get_fn('frame0.dcd'), top=nat)
for e in [tmpfns['xtc'], tmpfns['dcd'], tmpfns['binpos'], tmpfns['trr'], tmpfns['h5'], tmpfns['pdb'], tmpfns['pdb.gz'], tmpfns['nc']]:
def f():
t.save(e)
t2 = md.load(e, top=nat)
eq(t.xyz, t2.xyz, err_msg=e)
eq(t.time, t2.time, err_msg=e)
yield f
def test_dtr():
t = md.load(get_fn('ala_dipeptide_trj/clickme.dtr'), top=get_fn('ala_dipeptide.pdb'))
for e in [tmpfns['xtc'], tmpfns['dcd'], tmpfns['binpos'], tmpfns['trr'], tmpfns['h5'], tmpfns['pdb'], tmpfns['pdb.gz'], tmpfns['nc']]:
def f():
t.save(e)
t2 = md.load(e, top=get_fn('ala_dipeptide.pdb'))
# change decimal to 3 since the precision is different in different trajectory
# format
eq(t.xyz, t2.xyz, decimal=3, err_msg=e)
#eq(t.time, t2.time, err_msg=e)
yield f
def test_binpos():
t = md.load(get_fn('frame0.binpos'), top=nat)
for e in [tmpfns['xtc'], tmpfns['dcd'], tmpfns['binpos'], tmpfns['trr'], tmpfns['h5'], tmpfns['pdb'], tmpfns['pdb.gz'], tmpfns['nc']]:
def f():
t.save(e)
t2 = md.load(e, top=nat)
eq(t.xyz, t2.xyz, err_msg=e)
eq(t.time, t2.time, err_msg=e)
yield f
def test_load():
filenames = ["frame0.xtc", "frame0.trr", "frame0.dcd", "frame0.binpos",
"traj.h5", 'legacy_msmbuilder_trj0.lh5', 'frame0.nc', six.u('traj.h5'),
"frame0.lammpstrj", "frame0.xyz"]
num_block = 3
for filename in filenames:
t0 = md.load(get_fn(filename), top=nat, discard_overlapping_frames=True)
t1 = md.load(get_fn(filename), top=nat, discard_overlapping_frames=False)
t2 = md.load([get_fn(filename) for i in xrange(num_block)], top=nat, discard_overlapping_frames=False)
t3 = md.load([get_fn(filename) for i in xrange(num_block)], top=nat, discard_overlapping_frames=True)
# these don't actually overlap, so discard_overlapping_frames should have no effect
# the overlap is between the last frame of one and the first frame of the next.
yield lambda: eq(t0.n_frames, t1.n_frames)
yield lambda: eq(t0.n_frames * num_block, t2.n_frames)
yield lambda: eq(t3.n_frames, t2.n_frames)
def test_hdf5_0():
t = md.load(get_fn('traj.h5'))
t2 = md.load(get_fn('native.pdb'))
t3 = md.load(get_fn('traj.h5'), frame=8)
print (t.topology, t2.topology)
assert t.topology == t2.topology
yield lambda: eq(t.time, 0.002*(1 + np.arange(100)))
yield lambda: eq(t.time, 0.002*(1 + np.arange(100)))
yield lambda: eq(t[8].xyz, t3.xyz)
yield lambda: eq(t[8].time, t3.time)
yield lambda: eq(t[8].unitcell_vectors, t3.unitcell_vectors)
def test_center():
traj = md.load(get_fn('traj.h5'))
traj.center_coordinates()
mu = traj.xyz.mean(1)
mu0 = np.zeros(mu.shape)
eq(mu0, mu)
for a in traj.top.atoms:
#a.element.mass = 1.0 # Set all masses equal so we can compare against unweighted result
a.element = element.hydrogen
traj.center_coordinates(mass_weighted=True)
mu2 = traj.xyz.mean(1)
eq(mu0, mu2)
def test_center_aind():
traj = md.load(get_fn('traj.h5'))
traj.restrict_atoms(np.arange(0, traj.n_atoms, 2))
traj.center_coordinates()
mu = traj.xyz.mean(1)
mu0 = np.zeros(mu.shape)
eq(mu0, mu)
for a in traj.top.atoms:
#a.element.mass = 1.0 # Set all masses equal so we can compare against unweighted result
a.element = element.hydrogen
traj.center_coordinates(mass_weighted=True)
mu2 = traj.xyz.mean(1)
eq(mu0, mu2)
def test_float_atom_indices_exception():
"Is an informative error message given when you supply floats for atom_indices?"
top = md.load(get_fn('native.pdb')).topology
for ext in md._FormatRegistry.loaders.keys():
try:
fn = get_fn('frame0' + ext)
except:
continue
try:
md.load(fn, atom_indices=[0.5, 1.3], top=top)
except ValueError as e:
if six.PY3:
assert e.args[0] == 'indices must be of an integer type. float64 is not an integer type'
else:
assert e.message == 'indices must be of an integer type. float64 is not an integer type'
except Exception as e:
raise
def test_restrict_atoms():
traj = md.load(get_fn('traj.h5'))
time_address = traj.time.ctypes.data
desired_atom_indices = [0,1,2,5]
traj.restrict_atoms(desired_atom_indices)
atom_indices = [a.index for a in traj.top.atoms]
eq([0,1,2,3], atom_indices)
eq(traj.xyz.shape[1], 4)
eq(traj.n_atoms, 4)
eq(traj.n_residues, 1)
eq(len(traj.top._bonds), 2)
eq(traj.n_residues, traj.topology._numResidues)
eq(traj.n_atoms, traj.topology._numAtoms)
eq(np.array([a.index for a in traj.topology.atoms]), np.arange(traj.n_atoms))
# assert that the time field was not copied
assert traj.time.ctypes.data == time_address
def test_restrict_atoms_not_inplace():
traj = md.load(get_fn('traj.h5'))
traj_backup = md.load(get_fn('traj.h5'))
desired_atom_indices = [0,1,2,5]
sliced = traj.restrict_atoms(desired_atom_indices, inplace=False)
# make sure the original one was not modified
eq(traj.xyz, traj_backup.xyz)
eq(traj.topology, traj_backup.topology)
eq(list(range(4)), [a.index for a in sliced.top.atoms])
eq(sliced.xyz.shape[1], 4)
eq(sliced.n_atoms, 4)
eq(sliced.n_residues, 1)
eq(len(sliced.top._bonds), 2)
eq(sliced.n_residues, sliced.topology._numResidues)
eq(sliced.n_atoms, sliced.topology._numAtoms)
eq(np.array([a.index for a in sliced.topology.atoms]), np.arange(sliced.n_atoms))
# make sure the two don't alias the same memory
assert traj.time.ctypes.data != sliced.time.ctypes.data
assert traj.unitcell_angles.ctypes.data != sliced.unitcell_angles.ctypes.data
assert traj.unitcell_lengths.ctypes.data != sliced.unitcell_lengths.ctypes.data
def test_array_vs_matrix():
top = md.load(get_fn('native.pdb')).topology
xyz = np.random.randn(1, 22, 3)
xyz_mat = np.matrix(xyz)
t1 = md.Trajectory(xyz, top)
t2 = md.Trajectory(xyz_mat, top)
eq(t1.xyz, xyz)
eq(t2.xyz, xyz)
def test_pdb_unitcell_loadsave():
"""Make sure that nonstandard unitcell dimensions are saved and loaded
correctly with PDB"""
tref = md.load(get_fn('native.pdb'))
tref.unitcell_lengths = 1 + 0.1 * np.random.randn(tref.n_frames, 3)
tref.unitcell_angles = 90 + 0.0 * np.random.randn(tref.n_frames, 3)
tref.save(tmpfns['pdb'])
tnew = md.load(tmpfns['pdb'])
eq(tref.unitcell_vectors, tnew.unitcell_vectors, decimal=3)
def test_load_combination():
"Test that the load function's stride and atom_indices work across all trajectory formats"
topology = md.load(get_fn('native.pdb')).topology
ainds = np.array([a.index for a in topology.atoms if a.element.symbol == 'C'])
filenames = ['frame0.binpos', 'frame0.dcd', 'frame0.trr', 'frame0.xtc',
'frame0.nc', 'frame0.h5', 'frame0.pdb', 'legacy_msmbuilder_trj0.lh5',
'frame0.lammpstrj', 'frame0.xyz']
no_kwargs = [md.load(fn, top=topology) for fn in map(get_fn, filenames)]
strided3 = [md.load(fn, top=topology, stride=3) for fn in map(get_fn, filenames)]
subset = [md.load(fn, top=topology, atom_indices=ainds) for fn in map(get_fn, filenames)]
for i, (t1, t2) in enumerate(zip(no_kwargs, strided3)):
yield lambda: eq(t1.xyz[::3], t2.xyz)
yield lambda: eq(t1.time[::3], t2.time)
if t1.unitcell_vectors is not None:
yield lambda: eq(t1.unitcell_vectors[::3], t2.unitcell_vectors)
yield lambda: eq(t1.topology, t2.topology)
for i, (t1, t2) in enumerate(zip(no_kwargs, subset)):
yield lambda: eq(t1.xyz[:, ainds, :], t2.xyz)
yield lambda: eq(t1.time, t2.time)
if t1.unitcell_vectors is not None:
yield lambda: eq(t1.unitcell_vectors, t2.unitcell_vectors)
yield lambda: eq(t1.topology.subset(ainds), t2.topology)
def test_no_topology():
"We can make trajectories without a topology"
md.Trajectory(xyz=np.random.randn(10,5,3), topology=None)
def test_join():
xyz = np.random.rand(10,5,3)
# overlapping frames
t1 = md.Trajectory(xyz=xyz[:5], topology=None)
t2 = md.Trajectory(xyz=xyz[4:], topology=None)
t3 = t1.join(t2, discard_overlapping_frames=True)
t4 = t1.join(t2, discard_overlapping_frames=False)
eq(t3.xyz, xyz)
eq(len(t4.xyz), 11)
eq(t4.xyz, np.vstack((xyz[:5], xyz[4:])))
def test_stack_1():
t1 = md.load(get_fn('native.pdb'))
t2 = t1.stack(t1)
eq(t2.n_atoms, 2*t1.n_atoms)
eq(t2.topology._numAtoms, 2*t1.n_atoms)
eq(t1.xyz, t2.xyz[:, 0:t1.n_atoms])
eq(t1.xyz, t2.xyz[:, t1.n_atoms:])
def test_stack_2():
t1 = md.Trajectory(xyz=np.random.rand(10,5,3), topology=None)
t2 = md.Trajectory(xyz=np.random.rand(10,6,3), topology=None)
t3 = t1.stack(t2)
eq(t3.xyz[:, :5], t1.xyz)
eq(t3.xyz[:, 5:], t2.xyz)
eq(t3.n_atoms, 11)
def test_seek_read_mode():
# Test the seek/tell capacity of the different TrajectoryFile objects in
# read mode. Basically, we just seek around the files and read different
# segments, keeping track of our location manually and checking with both
# tell() and by checking that the right coordinates are actually returned
files = [(md.formats.NetCDFTrajectoryFile, 'frame0.nc'),
(md.formats.HDF5TrajectoryFile, 'frame0.h5'),
(md.formats.XTCTrajectoryFile, 'frame0.xtc'),
(md.formats.TRRTrajectoryFile, 'frame0.trr'),
(md.formats.DCDTrajectoryFile, 'frame0.dcd'),
(md.formats.MDCRDTrajectoryFile, 'frame0.mdcrd'),
(md.formats.BINPOSTrajectoryFile, 'frame0.binpos'),
(md.formats.LH5TrajectoryFile, 'legacy_msmbuilder_trj0.lh5'),
(md.formats.DTRTrajectoryFile, 'frame0.dtr/clickme.dtr'),
(md.formats.XYZTrajectoryFile, 'frame0.xyz'),
(md.formats.LAMMPSTrajectoryFile, 'frame0.lammpstrj'),]
for a, b in files:
point = 0
xyz = md.load(get_fn(b), top=get_fn('native.pdb')).xyz
length = len(xyz)
kwargs = {}
if a is md.formats.MDCRDTrajectoryFile:
kwargs = {'n_atoms': 22}
with a(get_fn(b), **kwargs) as f:
for i in range(100):
r = np.random.rand()
if r < 0.25:
offset = np.random.randint(-5, 5)
if 0 < point + offset < length:
point += offset
f.seek(offset, 1)
else:
f.seek(0)
point = 0
if r < 0.5:
offset = np.random.randint(1, 10)
if point + offset < length:
read = f.read(offset)
if a not in [md.formats.BINPOSTrajectoryFile, md.formats.LH5TrajectoryFile,
md.formats.XYZTrajectoryFile]:
read = read[0]
readlength = len(read)
read = mdtraj.utils.in_units_of(read, f.distance_unit, 'nanometers')
eq(xyz[point:point+offset], read)
point += readlength
elif r < 0.75:
offset = np.random.randint(low=-100, high=0)
try:
f.seek(offset, 2)
point = length + offset
except NotImplementedError:
# not all of the *TrajectoryFiles currently support
# seeking from the end, so we'll let this pass if they
# say that they dont implement this.
pass
else:
offset = np.random.randint(100)
f.seek(offset, 0)
point = offset
eq(f.tell(), point)
def test_load_frame():
files = ['frame0.nc', 'frame0.h5', 'frame0.xtc', 'frame0.trr',
'frame0.dcd', 'frame0.mdcrd', 'frame0.binpos',
'legacy_msmbuilder_trj0.lh5', 'frame0.xyz', 'frame0.lammpstrj']
trajectories = [md.load(get_fn(f), top=get_fn('native.pdb')) for f in files]
rand = [np.random.randint(len(t)) for t in trajectories]
frames = [md.load_frame(get_fn(f), index=r, top=get_fn('native.pdb')) for f, r in zip(files, rand)]
for traj, frame, r, f in zip(trajectories, frames, rand, files):
def test():
eq(traj[r].xyz, frame.xyz)
eq(traj[r].unitcell_vectors, frame.unitcell_vectors)
eq(traj[r].time, frame.time, err_msg='%d, %d: %s' % (traj[r].time[0], frame.time[0], f))
test.description = 'test_load_frame: %s' % f
yield test
t1 = md.load(get_fn('2EQQ.pdb'))
r = np.random.randint(len(t1))
t2 = md.load_frame(get_fn('2EQQ.pdb'), r)
eq(t1[r].xyz, t2.xyz)
def test_iterload():
t_ref = md.load(get_fn('frame0.h5'))[:20]
with enter_temp_directory():
for ext in t_ref._savers().keys():
# only a 1 frame per file format
if ext in ('.ncrst', '.rst7'):
continue
fn = 'temp%s' % ext
t_ref.save(fn)
def test():
for stride in [1, 2, 3]:
loaded = md.load(fn, top=t_ref, stride=stride)
iterloaded = functools.reduce(lambda a, b: a.join(b), md.iterload(fn, top=t_ref, stride=stride, chunk=6))
eq(loaded.xyz, iterloaded.xyz)
eq(loaded.time, iterloaded.time)
eq(loaded.unitcell_angles, iterloaded.unitcell_angles)
eq(loaded.unitcell_lengths, iterloaded.unitcell_lengths)
test.description = 'test_iterload: %s' % ext
yield test
def test_iterload_skip():
files = ['frame0.nc', 'frame0.h5', 'frame0.xtc', 'frame0.trr',
'frame0.dcd', 'frame0.binpos', 'legacy_msmbuilder_trj0.lh5',
'frame0.xyz', 'frame0.lammpstrj']
err_msg = "failed for file %s with chunksize %i and skip %i"
for file in files:
for cs in [0, 1, 11, 100]:
for skip in [0, 1, 20, 101]:
print("testing file %s with skip=%i" % (file, skip))
t_ref = md.load(get_fn(file), top=get_fn('native.pdb'))
t = functools.reduce(lambda a, b: a.join(b),
md.iterload(get_fn(file), skip=skip,
top=get_fn('native.pdb'), chunk=cs))
eq(t_ref.xyz[skip:], t.xyz, err_msg=err_msg % (file, cs, skip))
eq(t_ref.time[skip:], t.time, err_msg=err_msg % (file, cs, skip))
eq(t_ref.topology, t.topology, err_msg=err_msg % (file, cs, skip))
def test_save_load():
# this cycles all the known formats you can save to, and then tries
# to reload, using just a single-frame file.
t_ref = md.load(get_fn('native.pdb'))
t_ref.unitcell_vectors = np.array([[[1,0,0], [0,1,0], [0,0,1]]])
with enter_temp_directory():
for ext in t_ref._savers().keys():
def test():
fn = 'temp%s' % ext
t_ref.save(fn)
t = md.load(fn, top=t_ref.topology)
eq(t.xyz, t_ref.xyz)
eq(t.time, t_ref.time)
if t._have_unitcell:
eq(t.unitcell_angles, t_ref.unitcell_angles)
eq(t.unitcell_lengths, t_ref.unitcell_lengths)
test.description = 'test_save_load: %s' % ext
yield test
def test_length():
files = ['frame0.nc', 'frame0.h5', 'frame0.xtc', 'frame0.trr',
'frame0.mdcrd', '4waters.arc', 'frame0.dcd', '2EQQ.pdb',
'frame0.binpos', 'legacy_msmbuilder_trj0.lh5',
'frame0.lammpstrj', 'frame0.xyz']
for file in files:
if file.endswith('.mdcrd'):
kwargs = {'n_atoms': 22}
else:
kwargs = {}
def f():
try:
eq(len(md.open(get_fn(file), **kwargs)),
len(md.load(get_fn(file), top=get_fn('native.pdb'))))
except NotImplementedError as e:
raise SkipTest(e)
f.description = 'Length of file object: %s' % file
yield f
def test_unitcell():
# make sure that bogus unitcell vecotrs are not saved
top = md.load(get_fn('native.pdb')).restrict_atoms(range(5)).topology
t = md.Trajectory(xyz=np.random.randn(100, 5, 3), topology=top)
for e in [tmpfns['xtc'], tmpfns['dcd'], tmpfns['binpos'], tmpfns['trr'], tmpfns['h5'], tmpfns['pdb'], tmpfns['pdb.gz'], tmpfns['nc']]:
t.save(e)
f = lambda: eq(md.load(e, top=top).unitcell_vectors, None)
f.description = 'unitcell preservation in %s' % os.path.splitext(fn)[1]
yield f
def test_chunk0_iterload():
filename = 'frame0.h5'
trj0 = md.load(get_fn(filename))
for trj in md.iterload(get_fn(filename), chunk=0):
pass
eq(trj0.n_frames, trj.n_frames)
|
hainm/mdtraj
|
mdtraj/tests/test_trajectory.py
|
Python
|
lgpl-2.1
| 22,638
|
[
"MDTraj"
] |
5e12d473ef08458013d7bab4ec2cca6ee3b882b9ff28cc2ca460a082adbb3258
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from PyQt4 import QtCore
from PyQt4.QtCore import Qt
from customdelegate import CustomDelegate, DocumentationMetaclass
from camelot.view.controls import editors
from camelot.core.utils import variant_to_pyobject
from camelot.view.proxy import ValueLoading
class IntegerDelegate(CustomDelegate):
"""Custom delegate for integer values"""
__metaclass__ = DocumentationMetaclass
editor = editors.IntegerEditor
def __init__(self,
parent=None,
unicode_format = None,
**kwargs):
CustomDelegate.__init__(self, parent=parent, **kwargs)
self.unicode_format = unicode_format
self.locale = QtCore.QLocale()
def paint(self, painter, option, index):
painter.save()
self.drawBackground(painter, option, index)
value = variant_to_pyobject(index.model().data(index, Qt.EditRole))
if value in (None, ValueLoading):
value_str = ''
else:
value_str = self.locale.toString( long(value) )
if self.unicode_format is not None:
value_str = self.unicode_format(value)
self.paint_text( painter, option, index, value_str, horizontal_align=Qt.AlignRight )
painter.restore()
|
jeroendierckx/Camelot
|
camelot/view/controls/delegates/integerdelegate.py
|
Python
|
gpl-2.0
| 2,390
|
[
"VisIt"
] |
ff0d5ab8c01b2d0b79c5ff932b0e99434d9e4491bb8482da8bbe9c5353a1617a
|
from string import atoi,atof
import sys,os,commands
def get_interface(pdb,interfaceid):
cmd="curl --compressed http://eppic-web.org/ewui/ewui/fileDownload?type=interface\&id=%s\&interface=%s > %s-%s.%s.pdb"%(pdb,interfaceid,pdb,pdb,interfaceid)
os.system(cmd)
chains=commands.getoutput("cat %s-%s.%s.pdb | grep SEQRES | awk '{print $3}' | sort | uniq"%(pdb,pdb,interfaceid)).split("\n")
return chains
def get_pdbinterfaces(pdb1,interfaceid1,pdb2,interfaceid2):
chain1=get_interface(pdb1,interfaceid1)
chain2=get_interface(pdb2,interfaceid2)
fname="%s_%s-%s_%s.pml"%(pdb1,interfaceid1,pdb2,interfaceid2)
f=open(fname,'w')
f.write("reinitialize\n")
f.write("cd /home/baskaran_k/asym\n")
f.write("load %s-%s.%s.pdb\n"%(pdb1,pdb1,interfaceid1))
f.write("load %s-%s.%s.pdb\n"%(pdb2,pdb2,interfaceid2))
f.write("show cartoon\n")
f.write("hide lines\n")
f.write("align %s-%s.%s//%s//, %s-%s.%s//%s//\n"%(pdb1,pdb1,interfaceid1,chain1[0],pdb2,pdb2,interfaceid2,chain2[0]))
f.write("center\n")
f.write("color cyan, %s-%s.%s//%s//\n"%(pdb1,pdb1,interfaceid1,chain1[0]))
f.write("color yellow, %s-%s.%s//%s//\n"%(pdb1,pdb1,interfaceid1,chain1[1]))
f.write("color green, %s-%s.%s//%s//\n"%(pdb2,pdb2,interfaceid2,chain2[0]))
f.write("color red, %s-%s.%s//%s//\n"%(pdb2,pdb2,interfaceid2,chain2[1]))
f.close()
os.system("pymol %s"%(fname))
if __name__=="__main__":
pdb1=sys.argv[1]
interfaceid1=sys.argv[2]
pdb2=sys.argv[3]
interfaceid2=sys.argv[4]
get_pdbinterfaces(pdb1,interfaceid1,pdb2,interfaceid2)
|
kumar-physics/general-scripts
|
python/ss.py
|
Python
|
gpl-3.0
| 1,525
|
[
"PyMOL"
] |
b270279ad6c455469d81d0da5544d2b84db080aae003145c791d35b900a9485a
|
#!/usr/bin/env python
# Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal,
assert_almost_equal, run_module_suite)
from numpy import linspace, sin, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (1D)
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
def check_rbf2d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (2D).
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
def check_rbf3d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (3D).
x = random.rand(50, 1)*4 - 2
y = random.rand(50, 1)*4 - 2
z = random.rand(50, 1)*4 - 2
d = x*exp(-x**2 - y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
def test_rbf_interpolation():
for function in FUNCTIONS:
yield check_rbf1d_interpolation, function
yield check_rbf2d_interpolation, function
yield check_rbf3d_interpolation, function
def check_rbf1d_regularity(function, atol):
# Check that the Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-')
# plt.plot(x, y, 'o', xi, yi-sin(xi), ':')
# plt.title(function)
# plt.show()
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2)
def check_rbf1d_stability(function):
# Check that the Rbf function with default epsilon is not subject
# to overshoot. Regression for issue #4523.
#
# Generate some data (fixed random seed hence deterministic)
np.random.seed(1234)
x = np.linspace(0, 10, 50)
z = x + 4.0 * np.random.randn(len(x))
rbf = Rbf(x, z, function=function)
xi = np.linspace(0, 10, 1000)
yi = rbf(xi)
# subtract the linear trend and make sure there no spikes
assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
def test_rbf_stability():
for function in FUNCTIONS:
yield check_rbf1d_stability, function
def test_default_construction():
# Check that the Rbf class can be constructed with the default
# multiquadric basis function. Regression test for ticket #1228.
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
# Check that the Rbf class can be constructed with function=callable.
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
# Check that the Rbf class can be constructed with a two argument
# function=callable.
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, epsilon=None)
def test_rbf_epsilon_none_collinear():
# Check that collinear points in one dimension doesn't cause an error
# due to epsilon = 0
x = [1, 2, 3]
y = [4, 4, 4]
z = [5, 6, 7]
rbf = Rbf(x, y, z, epsilon=None)
assert_(rbf.epsilon > 0)
if __name__ == "__main__":
run_module_suite()
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/scipy/interpolate/tests/test_rbf.py
|
Python
|
gpl-2.0
| 4,626
|
[
"Gaussian"
] |
8399b41cd50fb0c60b3a3eef1f3d4acb979ba09951457876f9996730713e4d27
|
# -*- coding: utf-8 -*-
"""
Bok choy acceptance tests for Entrance exams in the LMS
"""
from textwrap import dedent
from common.test.acceptance.tests.helpers import UniqueCourseTest
from common.test.acceptance.pages.studio.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.problem import ProblemPage
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
class EntranceExamTest(UniqueCourseTest):
"""
Base class for tests of Entrance Exams in the LMS.
"""
USERNAME = "joe_student"
EMAIL = "joe@example.com"
def setUp(self):
super(EntranceExamTest, self).setUp()
self.xqueue_grade_response = None
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with a hierarchy and problems
course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name'],
settings={
'entrance_exam_enabled': 'true',
'entrance_exam_minimum_score_pct': '50'
}
)
problem = self.get_problem()
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(problem)
)
).install()
entrance_exam_subsection = None
outline = course_fixture.studio_course_outline_as_json
for child in outline['child_info']['children']:
if child.get('display_name') == "Entrance Exam":
entrance_exam_subsection = child['child_info']['children'][0]
if entrance_exam_subsection:
course_fixture.create_xblock(entrance_exam_subsection['id'], problem)
# Auto-auth register for the course.
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=False).visit()
def get_problem(self):
""" Subclasses should override this to complete the fixture """
raise NotImplementedError()
class EntranceExamPassTest(EntranceExamTest):
"""
Tests the scenario when a student passes entrance exam.
"""
def get_problem(self):
"""
Create a multiple choice problem
"""
xml = dedent("""
<problem>
<multiplechoiceresponse>
<label>What is height of eiffel tower without the antenna?.</label>
<choicegroup type="MultipleChoice">
<choice correct="false">324 meters<choicehint>Antenna is 24 meters high</choicehint></choice>
<choice correct="true">300 meters</choice>
<choice correct="false">224 meters</choice>
<choice correct="false">400 meters</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'HEIGHT OF EIFFEL TOWER', data=xml)
def test_course_is_unblocked_as_soon_as_student_passes_entrance_exam(self):
"""
Scenario: Ensure that entrance exam status message is updated and courseware is unblocked as soon as
student passes entrance exam.
Given I have a course with entrance exam as pre-requisite
When I pass entrance exam
Then I can see complete TOC of course
And I can see message indicating my pass status
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.wait_for_page().problem_name, 'HEIGHT OF EIFFEL TOWER')
self.assertTrue(self.courseware_page.has_entrance_exam_message())
self.assertFalse(self.courseware_page.has_passed_message())
problem_page.click_choice('choice_1')
problem_page.click_submit()
self.courseware_page.wait_for_page()
self.assertTrue(self.courseware_page.has_passed_message())
self.assertEqual(self.courseware_page.chapter_count_in_navigation, 2)
|
romain-li/edx-platform
|
common/test/acceptance/tests/lms/test_lms_entrance_exams.py
|
Python
|
agpl-3.0
| 4,146
|
[
"VisIt"
] |
3114109d55897e76d63f077d5115250765763f86c2ac4ad8933517f2e7488b45
|
##
# Copyright 2009-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing WPS, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Andreas Hilboll (University of Bremen)
"""
import os
import re
import tempfile
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.netcdf import set_netcdf_env_vars
from easybuild.easyblocks.wrf import det_wrf_subdir
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM, MANDATORY
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_option
from easybuild.tools.filetools import apply_regex_substitutions, change_dir, copy_file, extract_file, mkdir
from easybuild.tools.filetools import patch_perl_script_autoflush, remove_dir, symlink
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd, run_cmd_qa
class EB_WPS(EasyBlock):
"""Support for building/installing WPS."""
def __init__(self, *args, **kwargs):
"""Add extra config options specific to WPS."""
super(EB_WPS, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.comp_fam = None
self.compile_script = None
testdata_urls = ["http://www2.mmm.ucar.edu/wrf/src/data/avn_data.tar.gz"]
if LooseVersion(self.version) < LooseVersion('3.8'):
# 697MB download, 16GB unpacked!
testdata_urls.append("http://www2.mmm.ucar.edu/wrf/src/wps_files/geog.tar.gz")
elif LooseVersion(self.version) < LooseVersion('4.0'):
# 2.3GB download!
testdata_urls.append("http://www2.mmm.ucar.edu/wrf/src/wps_files/geog_complete.tar.gz")
else:
# 2.6GB download, 29GB unpacked!!
testdata_urls.append("http://www2.mmm.ucar.edu/wrf/src/wps_files/geog_high_res_mandatory.tar.gz")
if self.cfg.get('testdata') is None:
self.cfg['testdata'] = testdata_urls
if LooseVersion(self.version) < LooseVersion('4.0'):
self.wps_subdir = 'WPS'
else:
self.wps_subdir = 'WPS-%s' % self.version
@staticmethod
def extra_options():
extra_vars = {
'buildtype': [None, "Specify the type of build (smpar: OpenMP, dmpar: MPI).", MANDATORY],
'runtest': [True, "Build and run WPS tests", CUSTOM],
'testdata': [None, "URL to test data required to run WPS test", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def configure_step(self):
"""Configure build:
- set required environment variables (for netCDF, JasPer)
- patch compile script and ungrib Makefile for non-default install paths of WRF and JasPer
- run configure script and figure how to select desired build option
- patch configure.wps file afterwards to fix 'serial compiler' setting
"""
# netCDF dependency check + setting env vars (NETCDF, NETCDFF)
set_netcdf_env_vars(self.log)
# WRF dependency check
wrf = get_software_root('WRF')
if wrf:
wrfdir = os.path.join(wrf, det_wrf_subdir(get_software_version('WRF')))
else:
raise EasyBuildError("WRF module not loaded?")
self.compile_script = 'compile'
if LooseVersion(self.version) >= LooseVersion('4.0.3'):
# specify install location of WRF via $WRF_DIR (supported since WPS 4.0.3)
# see https://github.com/wrf-model/WPS/pull/102
env.setvar('WRF_DIR', wrfdir)
else:
# patch compile script so that WRF is found
regex_subs = [(r"^(\s*set\s*WRF_DIR_PRE\s*=\s*)\${DEV_TOP}(.*)$", r"\1%s\2" % wrfdir)]
apply_regex_substitutions(self.compile_script, regex_subs)
# libpng dependency check
libpng = get_software_root('libpng')
zlib = get_software_root('zlib')
if libpng:
paths = [libpng]
if zlib:
paths.insert(0, zlib)
libpnginc = ' '.join(['-I%s' % os.path.join(path, 'include') for path in paths])
libpnglib = ' '.join(['-L%s' % os.path.join(path, 'lib') for path in paths])
else:
# define these as empty, assume that libpng will be available via OS (e.g. due to --filter-deps=libpng)
libpnglib = ""
libpnginc = ""
# JasPer dependency check + setting env vars
jasper = get_software_root('JasPer')
if jasper:
env.setvar('JASPERINC', os.path.join(jasper, "include"))
jasperlibdir = os.path.join(jasper, "lib")
env.setvar('JASPERLIB', jasperlibdir)
jasperlib = "-L%s" % jasperlibdir
else:
raise EasyBuildError("JasPer module not loaded?")
# patch ungrib Makefile so that JasPer is found
jasperlibs = "%s -ljasper %s -lpng" % (jasperlib, libpnglib)
regex_subs = [
(r"^(\s*-L\.\s*-l\$\(LIBTARGET\))(\s*;.*)$", r"\1 %s\2" % jasperlibs),
(r"^(\s*\$\(COMPRESSION_LIBS\))(\s*;.*)$", r"\1 %s\2" % jasperlibs),
]
apply_regex_substitutions(os.path.join('ungrib', 'src', 'Makefile'), regex_subs)
# patch arch/Config.pl script, so that run_cmd_qa receives all output to answer questions
patch_perl_script_autoflush(os.path.join("arch", "Config.pl"))
# configure
# determine build type option to look for
self.comp_fam = self.toolchain.comp_family()
build_type_option = None
if LooseVersion(self.version) >= LooseVersion("3.4"):
knownbuildtypes = {
'smpar': 'serial',
'dmpar': 'dmpar'
}
if self.comp_fam == toolchain.INTELCOMP: # @UndefinedVariable
build_type_option = " Linux x86_64, Intel compiler"
elif self.comp_fam == toolchain.GCC: # @UndefinedVariable
if LooseVersion(self.version) >= LooseVersion("3.6"):
build_type_option = "Linux x86_64, gfortran"
else:
build_type_option = "Linux x86_64 g95"
else:
raise EasyBuildError("Don't know how to figure out build type to select.")
else:
knownbuildtypes = {
'smpar': 'serial',
'dmpar': 'DM parallel'
}
if self.comp_fam == toolchain.INTELCOMP: # @UndefinedVariable
build_type_option = "PC Linux x86_64, Intel compiler"
elif self.comp_fam == toolchain.GCC: # @UndefinedVariable
build_type_option = "PC Linux x86_64, gfortran compiler,"
knownbuildtypes['dmpar'] = knownbuildtypes['dmpar'].upper()
else:
raise EasyBuildError("Don't know how to figure out build type to select.")
# check and fetch selected build type
bt = self.cfg['buildtype']
if bt not in knownbuildtypes.keys():
raise EasyBuildError("Unknown build type: '%s'. Supported build types: %s", bt, knownbuildtypes.keys())
# fetch option number based on build type option and selected build type
build_type_question = "\s*(?P<nr>[0-9]+).\s*%s\s*\(?%s\)?\s*\n" % (build_type_option, knownbuildtypes[bt])
cmd = "./configure"
qa = {}
no_qa = [".*compiler is.*"]
std_qa = {
# named group in match will be used to construct answer
r"%s(.*\n)*Enter selection\s*\[[0-9]+-[0-9]+\]\s*:" % build_type_question: "%(nr)s",
}
run_cmd_qa(cmd, qa, no_qa=no_qa, std_qa=std_qa, log_all=True, simple=True)
# make sure correct compilers and compiler flags are being used
comps = {
'SCC': "%s -I$(JASPERINC) %s" % (os.getenv('CC'), libpnginc),
'SFC': os.getenv('F90'),
'DM_FC': os.getenv('MPIF90'),
'DM_CC': os.getenv('MPICC'),
'FC': os.getenv('MPIF90'),
'CC': os.getenv('MPICC'),
}
if self.toolchain.options.get('openmp', None):
comps.update({'LDFLAGS': '%s %s' % (self.toolchain.get_flag('openmp'), os.environ['LDFLAGS'])})
regex_subs = [(r"^(%s\s*=\s*).*$" % key, r"\1 %s" % val) for (key, val) in comps.items()]
apply_regex_substitutions('configure.wps', regex_subs)
def build_step(self):
"""Build in install dir using compile script."""
cmd = "./%s" % self.compile_script
run_cmd(cmd, log_all=True, simple=True)
def test_step(self):
"""Run WPS test (requires large dataset to be downloaded). """
wpsdir = None
def run_wps_cmd(cmdname, mpi_cmd=True):
"""Run a WPS command, and check for success."""
cmd = os.path.join(wpsdir, "%s.exe" % cmdname)
if mpi_cmd:
if build_option('mpi_tests'):
cmd = self.toolchain.mpi_cmd_for(cmd, 1)
else:
self.log.info("Skipping MPI test for %s, since MPI tests are disabled", cmd)
return
(out, _) = run_cmd(cmd, log_all=True, simple=False)
re_success = re.compile("Successful completion of %s" % cmdname)
if not re_success.search(out):
raise EasyBuildError("%s.exe failed (pattern '%s' not found)?", cmdname, re_success.pattern)
if self.cfg['runtest']:
if not self.cfg['testdata']:
raise EasyBuildError("List of URLs for testdata not provided.")
wpsdir = os.path.join(self.builddir, self.wps_subdir)
try:
# create temporary directory
tmpdir = tempfile.mkdtemp()
change_dir(tmpdir)
# download data
testdata_paths = []
for testdata in self.cfg['testdata']:
path = self.obtain_file(testdata)
if not path:
raise EasyBuildError("Downloading file from %s failed?", testdata)
testdata_paths.append(path)
# unpack data
for path in testdata_paths:
srcdir = extract_file(path, tmpdir, change_into_dir=False)
change_dir(srcdir)
namelist_file = os.path.join(tmpdir, 'namelist.wps')
# GEOGRID
# setup directories and files
if LooseVersion(self.version) < LooseVersion("4.0"):
geog_data_dir = "geog"
else:
geog_data_dir = "WPS_GEOG"
for dir_name in os.listdir(os.path.join(tmpdir, geog_data_dir)):
symlink(os.path.join(tmpdir, geog_data_dir, dir_name), os.path.join(tmpdir, dir_name))
# copy namelist.wps file and patch it for geogrid
copy_file(os.path.join(wpsdir, 'namelist.wps'), namelist_file)
regex_subs = [(r"^(\s*geog_data_path\s*=\s*).*$", r"\1 '%s'" % tmpdir)]
apply_regex_substitutions(namelist_file, regex_subs)
# GEOGRID.TBL
geogrid_dir = os.path.join(tmpdir, 'geogrid')
mkdir(geogrid_dir)
symlink(os.path.join(wpsdir, 'geogrid', 'GEOGRID.TBL.ARW'),
os.path.join(geogrid_dir, 'GEOGRID.TBL'))
# run geogrid.exe
run_wps_cmd("geogrid")
# UNGRIB
# determine start and end time stamps of grib files
grib_file_prefix = "fnl_"
k = len(grib_file_prefix)
fs = [f for f in sorted(os.listdir('.')) if f.startswith(grib_file_prefix)]
start = "%s:00:00" % fs[0][k:]
end = "%s:00:00" % fs[-1][k:]
# copy namelist.wps file and patch it for ungrib
copy_file(os.path.join(wpsdir, 'namelist.wps'), namelist_file)
regex_subs = [
(r"^(\s*start_date\s*=\s*).*$", r"\1 '%s','%s'," % (start, start)),
(r"^(\s*end_date\s*=\s*).*$", r"\1 '%s','%s'," % (end, end)),
]
apply_regex_substitutions(namelist_file, regex_subs)
# copy correct Vtable
vtable_dir = os.path.join(wpsdir, 'ungrib', 'Variable_Tables')
if os.path.exists(os.path.join(vtable_dir, 'Vtable.ARW')):
copy_file(os.path.join(vtable_dir, 'Vtable.ARW'), os.path.join(tmpdir, 'Vtable'))
elif os.path.exists(os.path.join(vtable_dir, 'Vtable.ARW.UPP')):
copy_file(os.path.join(vtable_dir, 'Vtable.ARW.UPP'), os.path.join(tmpdir, 'Vtable'))
else:
raise EasyBuildError("Could not find Vtable file to use for testing ungrib")
# run link_grib.csh script
cmd = "%s %s*" % (os.path.join(wpsdir, "link_grib.csh"), grib_file_prefix)
run_cmd(cmd, log_all=True, simple=True)
# run ungrib.exe
run_wps_cmd("ungrib", mpi_cmd=False)
# METGRID.TBL
metgrid_dir = os.path.join(tmpdir, 'metgrid')
mkdir(metgrid_dir)
symlink(os.path.join(wpsdir, 'metgrid', 'METGRID.TBL.ARW'),
os.path.join(metgrid_dir, 'METGRID.TBL'))
# run metgrid.exe
run_wps_cmd('metgrid')
# clean up
change_dir(self.builddir)
remove_dir(tmpdir)
except OSError as err:
raise EasyBuildError("Failed to run WPS test: %s", err)
# installing is done in build_step, so we can run tests
def install_step(self):
"""Building was done in install dir, so just do some cleanup here."""
# make sure JASPER environment variables are unset
env_vars = ['JASPERINC', 'JASPERLIB']
for env_var in env_vars:
if env_var in os.environ:
os.environ.pop(env_var)
def sanity_check_step(self):
"""Custom sanity check for WPS."""
custom_paths = {
'files': [os.path.join(self.wps_subdir, x) for x in ['geogrid.exe', 'metgrid.exe', 'ungrib.exe']],
'dirs': [],
}
super(EB_WPS, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Make sure PATH and LD_LIBRARY_PATH are set correctly."""
return {
'PATH': [self.wps_subdir],
'LD_LIBRARY_PATH': [self.wps_subdir],
'MANPATH': [],
}
def make_module_extra(self):
"""Add netCDF environment variables to module file."""
txt = super(EB_WPS, self).make_module_extra()
for var in ['NETCDF', 'NETCDFF']:
# check whether value is defined for compatibility with --module-only
if os.getenv(var) is not None:
txt += self.module_generator.set_environment(var, os.getenv(var))
return txt
|
pescobar/easybuild-easyblocks
|
easybuild/easyblocks/w/wps.py
|
Python
|
gpl-2.0
| 16,404
|
[
"NetCDF"
] |
88e618ab10ee94aa762bfb6dcf48b1328591f0a95001cdb7e20fa1692867d696
|
#!/usr/bin/env python
"""Combine_classifications.py: fills in taxonomic classification from separate tsv.
From tab-separated blast files corresponding to fwd and rev reads,
the 5% top HSP making cutoffs for evalue, coverage and identity
in both files are used for LCA determination"""
from collections import defaultdict
import argparse
import csv
import re
__author__ = "Yue O Hu and Luisa W Hugerth"
__email__ = "luisa.hugerth@scilifelab.se"
def parsetax(taxdict, leveldict, taxfile, level):
with open(taxfile) as csvfile:
reader = csv.reader(csvfile, delimiter="\t")
for row in reader:
query = row[0]
tax = row[1]
if query not in taxdict or taxdict[query] == "Unclassified":
taxdict[query] = tax
leveldict[query] = level
return taxdict, leveldict
def main(infiles, names):
filelist = infiles.split(",")
namelist = names.split(",")
count = 0
taxdict = dict()
leveldict = dict()
for infile in filelist:
taxdict, leveldict = parsetax(taxdict, leveldict, infile, namelist[count])
count += 1
for query, tax in taxdict.iteritems():
print query + "\t" + leveldict[query] + "\t" + tax
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Combines taxonomic assignment at different stringency levels')
parser.add_argument('-i', '--infiles', help='Paths to relevant tsv taxonomies (outputs of taxonomy_blast_parser.py) in priority order,\
\ separated by ","')
parser.add_argument('-n', '--names', help='Short names for the taxonomines, in the same order, separated by ","')
args = parser.parse_args()
main(args.infiles, args.names)
|
EnvGen/toolbox
|
scripts/combine_taxonomy.py
|
Python
|
mit
| 1,614
|
[
"BLAST"
] |
5dd4bb68c90195f92f1cd4fe9c3cec33365659fc9e8afbd119f0fc2ca46f4cc5
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
import ajax_select.fields
import six
from django import forms
from django.conf import settings
from django.contrib.admin.widgets import AdminDateWidget
from django.contrib.auth.forms import SetPasswordForm as BaseSetPasswordForm
from karaage.common import get_current_person
from karaage.common.constants import COUNTRIES
from karaage.common.forms import (
clean_email,
validate_password,
validate_phone_number,
)
from karaage.institutes.models import Institute
from karaage.people.models import Group, Person
from karaage.people.utils import (
UsernameException,
validate_username_for_new_person,
)
from karaage.projects.models import Project
from karaage.projects.utils import add_user_to_project
class PersonForm(forms.ModelForm):
# title = forms.ChoiceField(choices=TITLES, required=False)
# position = forms.CharField(required=False)
# email = forms.EmailField()
# department = forms.CharField(required=False)
# supervisor = forms.CharField(required=False)
telephone = forms.CharField(
required=True,
label=six.u("Office Telephone"),
help_text=six.u(
"Used for emergency contact and password reset service."),
validators=[validate_phone_number],
)
mobile = forms.CharField(
required=False,
validators=[validate_phone_number],
)
fax = forms.CharField(required=False, validators=[validate_phone_number])
address = forms.CharField(
label=six.u("Mailing Address"),
required=False,
widget=forms.Textarea())
country = forms.ChoiceField(
choices=COUNTRIES, initial='AU', required=False)
def __init__(self, *args, **kwargs):
super(PersonForm, self).__init__(*args, **kwargs)
self.fields['short_name'].help_text = \
"This is typically the person's given name. "\
"For example enter 'Fred' here."
self.fields['full_name'].help_text = \
"This is typically the person's full name. " \
"For example enter 'Fred Smith' here."
def clean(self):
data = super(PersonForm, self).clean()
for key in [
'short_name', 'full_name', 'email', 'position',
'supervisor', 'department', 'telephone', 'mobile', 'fax',
'address', ]:
if key in data and data[key]:
data[key] = data[key].strip()
return data
class Meta:
model = Person
fields = [
'short_name', 'full_name', 'email', 'title', 'position',
'supervisor', 'department', 'telephone', 'mobile', 'fax',
'address', 'country'
]
def clean_email(self):
email = self.cleaned_data['email']
users = Person.objects.filter(email__exact=email)
if self.instance:
users = users.exclude(pk=self.instance.pk)
if users.count() > 0:
raise forms.ValidationError(
six.u(
'An account with this email already exists. '
'Please email %s')
% settings.ACCOUNTS_EMAIL)
clean_email(email)
return email
class AdminPersonForm(PersonForm):
institute = forms.ModelChoiceField(queryset=None)
comment = forms.CharField(widget=forms.Textarea(), required=False)
expires = forms.DateField(widget=AdminDateWidget, required=False)
is_admin = forms.BooleanField(
help_text="Designates whether the user can log into this admin site.",
required=False)
is_systemuser = forms.BooleanField(
help_text="Designates that this user is a system process, "
"not a person.",
required=False)
def __init__(self, *args, **kwargs):
super(AdminPersonForm, self).__init__(*args, **kwargs)
self.fields['institute'].queryset = Institute.active.all()
class Meta:
model = Person
fields = [
'short_name', 'full_name', 'email', 'title', 'position',
'supervisor', 'department', 'institute', 'telephone', 'mobile',
'fax', 'address', 'country', 'expires', 'comment',
'is_systemuser', 'is_admin', ]
class AddPersonForm(AdminPersonForm):
project = forms.ModelChoiceField(
queryset=None,
label=six.u("Default Project"), required=False)
needs_account = forms.BooleanField(
required=False, label=six.u("Do you require a cluster account"),
help_text=six.u("eg. Will you be working on the project yourself"))
username = forms.CharField(
label=six.u("Requested username"),
max_length=settings.USERNAME_MAX_LENGTH,
help_text=(settings.USERNAME_VALIDATION_ERROR_MSG
+ " and has a max length of %s."
% settings.USERNAME_MAX_LENGTH))
password1 = forms.CharField(
widget=forms.PasswordInput(render_value=False),
label=six.u('Password'))
password2 = forms.CharField(
widget=forms.PasswordInput(render_value=False),
label=six.u('Password (again)'))
def __init__(self, *args, **kwargs):
super(AddPersonForm, self).__init__(*args, **kwargs)
self.fields['project'].queryset = Project.objects.all()
def clean_username(self):
username = self.cleaned_data['username']
try:
validate_username_for_new_person(username)
except UsernameException as e:
raise forms.ValidationError(e.args[0])
return username
def clean_password2(self):
username = self.cleaned_data.get('username')
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
return validate_password(username, password1, password2)
def save(self, commit=True):
assert commit is True
data = self.cleaned_data
person = super(AddPersonForm, self).save(commit=False)
person.username = data['username']
person.is_admin = data['is_admin']
person.is_active = True
person.approved_by = get_current_person()
person.set_password(data['password2'])
person.save()
if data['needs_account'] and data['project']:
add_user_to_project(person, data['project'])
return person
class AdminPasswordChangeForm(forms.Form):
new1 = forms.CharField(
widget=forms.PasswordInput(),
label=six.u('New Password'))
new2 = forms.CharField(
widget=forms.PasswordInput(),
label=six.u('New Password (again)'))
def __init__(self, person, *args, **kwargs):
self.person = person
super(AdminPasswordChangeForm, self).__init__(*args, **kwargs)
def clean_new2(self):
username = self.person.username
password1 = self.cleaned_data.get('new1')
password2 = self.cleaned_data.get('new2')
return validate_password(username, password1, password2)
def save(self):
data = self.cleaned_data
person = self.person
person.set_password(data['new1'])
person.save()
class PasswordChangeForm(AdminPasswordChangeForm):
old = forms.CharField(widget=forms.PasswordInput(), label='Old password')
def clean_new2(self):
username = self.person.username
password1 = self.cleaned_data.get('new1')
password2 = self.cleaned_data.get('new2')
old_password = self.cleaned_data.get('old', None)
return validate_password(username, password1, password2, old_password)
def clean_old(self):
person = Person.objects.authenticate(
username=self.person.username,
password=self.cleaned_data['old'])
if person is None:
raise forms.ValidationError(
six.u('Your old password was incorrect'))
return self.cleaned_data['old']
class SetPasswordForm(BaseSetPasswordForm):
def clean_new_password1(self):
password1 = self.cleaned_data.get('new_password1')
return validate_password(self.user.username, password1)
class AdminGroupForm(forms.Form):
name = forms.RegexField(
"^%s$" % settings.GROUP_VALIDATION_RE,
required=True,
error_messages={'invalid': settings.GROUP_VALIDATION_ERROR_MSG})
description = forms.CharField()
def __init__(self, *args, **kwargs):
self.instance = kwargs.pop('instance', None)
super(AdminGroupForm, self).__init__(*args, **kwargs)
if self.instance is not None:
self.initial = self.instance.__dict__
def clean_name(self):
name = self.cleaned_data["name"]
groups = Group.objects.filter(name=name)
if self.instance is not None:
groups = groups.exclude(pk=self.instance.pk)
if groups.count() > 0:
raise forms.ValidationError("That group name already exists.")
return name
def save(self, group=None):
data = self.cleaned_data
if self.instance is None:
group = Group()
else:
group = self.instance
group.name = data['name']
group.description = data['description']
group.save()
return group
class AddGroupMemberForm(forms.Form):
""" Add a user to a group form """
person = ajax_select.fields.AutoCompleteSelectField(
'person', required=True, label="Add person")
def __init__(self, *args, **kwargs):
self.instance = kwargs.pop('instance', None)
super(AddGroupMemberForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
person = self.cleaned_data['person']
self.instance.add_person(person)
return self.instance
|
brianmay/karaage
|
karaage/people/forms.py
|
Python
|
gpl-3.0
| 10,433
|
[
"Brian"
] |
2247f2f8da33313825d793ebb0ac4804145b7f219b90231e590a3869849ef98a
|
'''
Module of learners used to determine what parameters to try next given previous cost evaluations.
Each learner is created and controlled by a controller.
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import threading
import numpy as np
import random
import numpy.random as nr
import scipy.optimize as so
import logging
import datetime
import os
import mloop.utilities as mlu
import multiprocessing as mp
import sklearn.gaussian_process as skg
import sklearn.gaussian_process.kernels as skk
import sklearn.preprocessing as skp
from mloop import __version__
import mloop.neuralnet as mlnn
#Lazy import of scikit-learn and tensorflow
learner_thread_count = 0
default_learner_archive_filename = 'learner_archive'
default_learner_archive_file_type = 'txt'
class LearnerInterrupt(Exception):
'''
Exception that is raised when the learner is ended with the end flag or event.
'''
def __init__(self):
'''
Create LearnerInterrupt.
'''
super(LearnerInterrupt,self).__init__()
class Learner():
'''
Base class for all learners. Contains default boundaries and some useful functions that all learners use.
The class that inherits from this class should also inherit from threading.Thread or multiprocessing.Process, depending if you need the learner to be a genuine parallel process or not.
Keyword Args:
num_params (Optional [int]): The number of parameters to be optimized. If None defaults to 1. Default None.
min_boundary (Optional [array]): Array with minimum values allowed for each parameter. Note if certain values have no minimum value you can set them to -inf for example [-1, 2, float('-inf')] is a valid min_boundary. If None sets all the boundaries to '-1'. Default None.
max_boundary (Optional [array]): Array with maximum values allowed for each parameter. Note if certain values have no maximum value you can set them to +inf for example [0, float('inf'),3,-12] is a valid max_boundary. If None sets all the boundaries to '1'. Default None.
learner_archive_filename (Optional [string]): Name for python archive of the learners current state. If None, no archive is saved. Default None. But this is typically overloaded by the child class.
learner_archive_file_type (Optional [string]): File type for archive. Can be either 'txt' a human readable text file, 'pkl' a python dill file, 'mat' a matlab file or None if there is no archive. Default 'mat'.
log_level (Optional [int]): Level for the learners logger. If None, set to warning. Default None.
start_datetime (Optional [datetime]): Start date time, if None, is automatically generated.
param_names (Optional [list of str]): A list of names of the parameters for use e.g. in plot legends. Number of elements must equal num_params. If None, each name will be set to an empty sting. Default None.
Attributes:
params_out_queue (queue): Queue for parameters created by learner.
costs_in_queue (queue): Queue for costs to be used by learner.
end_event (event): Event to trigger end of learner.
all_params (array): Array containing all parameters sent to learner.
all_costs (array): Array containing all costs sent to learner.
all_uncers (array): Array containing all uncertainties sent to learner.
bad_run_indexs (list): list of indexes to all runs that were marked as
bad.
'''
def __init__(self,
num_params=None,
min_boundary=None,
max_boundary=None,
learner_archive_filename=default_learner_archive_filename,
learner_archive_file_type=default_learner_archive_file_type,
start_datetime=None,
param_names=None,
**kwargs):
super(Learner,self).__init__()
self._prepare_logger()
self.learner_wait=float(1)
self.remaining_kwargs = kwargs
self.params_out_queue = mp.Queue()
self.costs_in_queue = mp.Queue()
self.end_event = mp.Event()
if num_params is None:
self.log.warning('num_params not provided, setting to default value of 1.')
self.num_params = 1
else:
self.num_params = int(num_params)
if self.num_params <= 0:
msg = 'Number of parameters must be greater than zero:' + repr(self.num_params)
self.log.error(msg)
raise ValueError(msg)
if min_boundary is None:
self.min_boundary = np.full((self.num_params,), -1.0)
else:
self.min_boundary = np.array(min_boundary, dtype=np.float)
if self.min_boundary.shape != (self.num_params,):
msg = 'min_boundary array the wrong shape:' + repr(self.min_boundary.shape)
self.log.error(msg)
raise ValueError(msg)
if max_boundary is None:
self.max_boundary = np.full((self.num_params,), 1.0)
else:
self.max_boundary = np.array(max_boundary, dtype=np.float)
if self.max_boundary.shape != (self.num_params,):
msg = 'max_boundary array the wrong shape:' + self.min_boundary.shape
self.log.error(msg)
raise ValueError(msg)
self.diff_boundary = self.max_boundary - self.min_boundary
if not np.all(self.diff_boundary>0.0):
msg = 'Not all elements of max_boundary are larger than min_boundary'
self.log.error(msg)
raise ValueError(msg)
if start_datetime is None:
self.start_datetime = datetime.datetime.now()
else:
self.start_datetime = start_datetime
if mlu.check_file_type_supported(learner_archive_file_type):
self.learner_archive_file_type = learner_archive_file_type
else:
msg = 'File in type is not supported:' + learner_archive_file_type
self.log.error(msg)
raise ValueError(msg)
if learner_archive_filename is None:
self.learner_archive_filename = None
self.learner_archive_dir = None
else:
# Store self.learner_archive_filename without any path, but include
# any path components in learner_archive_filename when constructing
# the full path.
learner_archive_filename = str(learner_archive_filename)
self.learner_archive_filename = os.path.basename(learner_archive_filename)
filename_suffix = mlu.generate_filename_suffix(
self.learner_archive_file_type,
file_datetime=self.start_datetime,
)
filename = learner_archive_filename + filename_suffix
self.total_archive_filename = os.path.join(mlu.archive_foldername, filename)
# Include any path info from learner_archive_filename when creating
# directory for archive files.
learner_archive_dir = os.path.dirname(self.total_archive_filename)
self.learner_archive_dir = learner_archive_dir
if not os.path.exists(learner_archive_dir):
os.makedirs(learner_archive_dir)
# Interpret/check param_names.
if param_names is None:
self.param_names = [''] * self.num_params
else:
self.param_names = param_names
# Ensure that there are the correct number of entries.
if len(self.param_names) != self.num_params:
msg = ('param_names has {n_names} elements but there are '
'{n_params} parameters.').format(
n_names=len(self.param_names),
n_params=self.num_params
)
self.log.error(msg)
raise ValueError(msg)
# Ensure that all of the entries are strings.
self.param_names = [str(name) for name in self.param_names]
#Storage variables, archived
self.all_params = np.array([], dtype=float)
self.all_costs = np.array([], dtype=float)
self.all_uncers = np.array([], dtype=float)
self.bad_run_indexs = []
self.archive_dict = {'mloop_version':__version__,
'archive_type':'learner',
'num_params':self.num_params,
'min_boundary':self.min_boundary,
'max_boundary':self.max_boundary,
'start_datetime':mlu.datetime_to_string(self.start_datetime),
'param_names':self.param_names}
self.log.debug('Learner init completed.')
def _prepare_logger(self):
'''
Prepare the logger.
If `self.log` already exists, then this method silently returns without
changing anything.
'''
if not hasattr(self, 'log'):
global learner_thread_count
learner_thread_count += 1
name = __name__ + '.' + str(learner_thread_count)
self.log = logging.getLogger(name)
def check_num_params(self,param):
'''
Check the number of parameters is right.
'''
return param.shape == (self.num_params,)
def check_in_boundary(self,param):
'''
Check given parameters are within stored boundaries.
Args:
param (array): array of parameters
Returns:
bool : True if the parameters are within boundaries, False otherwise.
'''
param = np.array(param)
testbool = np.all(param >= self.min_boundary) and np.all(param <= self.max_boundary)
return testbool
def check_in_diff_boundary(self,param):
'''
Check given distances are less than the boundaries.
Args:
param (array): array of distances
Returns:
bool : True if the distances are smaller or equal to boundaries, False otherwise.
'''
param = np.array(param)
testbool = np.all(param<=self.diff_boundary)
return testbool
def put_params_and_get_cost(self, params, **kwargs):
'''
Send parameters to queue and whatever additional keywords.
Also saves sent and received variables in appropriate storage arrays.
Args:
params (array) : array of values to be sent to file
Returns:
cost from the cost queue
'''
#self.log.debug('Learner params='+repr(params))
if not self.check_num_params(params):
msg = 'Incorrect number of parameters sent to queue. Params' + repr(params)
self.log.error(msg)
raise ValueError(msg)
if not self.check_in_boundary(params):
self.log.warning('Parameters sent to queue are not within boundaries. Params:' + repr(params))
#self.log.debug('Learner puts params.')
self.params_out_queue.put(params)
#self.log.debug('Learner waiting for costs.')
self.save_archive()
while not self.end_event.is_set():
try:
message = self.costs_in_queue.get(True, self.learner_wait)
except mlu.empty_exception:
continue
else:
break
else:
self.log.debug('Learner end signal received. Ending')
# Check for one more message which may have been was lost in a race
# with the end_event being set.
try:
message = self.costs_in_queue.get(True, self.learner_wait)
except mlu.empty_exception:
pass
else:
params, cost, uncer, bad = self._parse_cost_message(message)
self._update_run_data_attributes(params, cost, uncer, bad)
raise LearnerInterrupt
#self.log.debug('Learner cost='+repr(cost))
# Record values.
params, cost, uncer, bad = self._parse_cost_message(message)
self._update_run_data_attributes(params, cost, uncer, bad)
return cost
def _parse_cost_message(self, message):
'''
Parse a message sent from the controller via `self.costs_in_queue`.
Args:
message (tuple): A tuple put in `self.costs_in_queue` by the
controller. It should be of the form
`(params, cost, uncer, bad)` where `params` is an array
specifying the parameter values used, `cost` is the measured
cost for those parameter values, `uncer` is the uncertainty
measured for those parameter values, and `bad` is a boolean
indicating whether the run was bad.
Raises:
ValueError: A `ValueError` is raised if the number of parameters in
the provided `params` doesn't match `self.num_params`.
Returns:
tuple: A tuple of the form `(params, cost, uncer, bad)`. For more
information on the meaning of those parameters, see the entry
for the `message` argument above.
'''
params, cost, uncer, bad = message
params = np.array(params, dtype=float)
if not self.check_num_params(params):
msg = ('Expected {num_params} parameters, but parameters were: '
'{params}.').format(
num_params=self.num_params,
params=repr(params),
)
self.log.error(msg)
raise ValueError(msg)
if not self.check_in_boundary(params):
self.log.warning('Parameters provided to learner not in boundaries:' + repr(params))
cost = float(cost)
uncer = float(uncer)
if uncer < 0:
self.log.error('Provided uncertainty must be larger or equal to zero:' + repr(uncer))
return params, cost, uncer, bad
def _update_run_data_attributes(self, params, cost, uncer, bad):
'''
Update attributes that store the results returned by the controller.
Args:
params (array): Array of control parameter values.
cost (float): The cost measured for `params`.
uncer (float): The uncertainty measured for `params`.
bad (bool): Whether or not the run was bad.
'''
if self.all_params.size==0:
self.all_params = np.array([params], dtype=float)
self.all_costs = np.array([cost], dtype=float)
self.all_uncers = np.array([uncer], dtype=float)
else:
# params
params_array = np.array([params], dtype=float)
self.all_params = np.append(self.all_params, params_array, axis=0)
# cost
cost_array = np.array([cost], dtype=float)
self.all_costs = np.append(self.all_costs, cost_array, axis=0)
# uncer
uncer_array = np.array([uncer], dtype=float)
self.all_uncers = np.append(self.all_uncers, uncer_array, axis=0)
if bad:
cost_index = len(self.all_costs) - 1
self.bad_run_indexs.append(cost_index)
def save_archive(self):
'''
Save the archive associated with the learner class. Only occurs if the filename for the archive is not None. Saves with the format previously set.
'''
self.update_archive()
if self.learner_archive_filename is not None:
mlu.save_dict_to_file(self.archive_dict, self.total_archive_filename, self.learner_archive_file_type)
def update_archive(self):
'''
Update the dictionary of parameters and values to save to the archive.
Child classes should call this method and also updated
`self.archive_dict` with any other parameters and values that need to be
saved to the learner archive.
'''
new_values_dict = {
'all_params':self.all_params,
'all_costs':self.all_costs,
'all_uncers':self.all_uncers,
'bad_run_indexs':self.bad_run_indexs,
}
self.archive_dict.update(new_values_dict)
def _set_trust_region(self,trust_region):
'''
Sets trust region properties for learner that have this. Common function for learners with trust regions.
Args:
trust_region (float or array): Property defines the trust region.
'''
if trust_region is None:
self.trust_region = float('nan')
self.has_trust_region = False
else:
self.has_trust_region = True
if isinstance(trust_region , float):
if trust_region > 0 and trust_region < 1:
self.trust_region = trust_region * self.diff_boundary
else:
msg = 'Trust region, when a float, must be between 0 and 1: '+repr(trust_region)
self.log.error(msg)
raise ValueError(msg)
else:
self.trust_region = np.array(trust_region, dtype=float)
if self.has_trust_region:
if not self.check_num_params(self.trust_region):
msg = 'Shape of the trust_region does not match the number of parameters:' + repr(self.trust_region)
self.log.error(msg)
raise ValueError(msg)
if not np.all(self.trust_region>0):
msg = 'All trust_region values must be positive:' + repr(self.trust_region)
self.log.error(msg)
raise ValueError(msg)
if not self.check_in_diff_boundary(self.trust_region):
msg = 'The trust_region must be smaller than the range of the boundaries:' + repr(self.trust_region)
self.log.error(msg)
raise ValueError(msg)
def _shut_down(self):
'''
Shut down and perform one final save of learner.
'''
self.log.debug('Performing shut down of learner.')
self.save_archive()
class RandomLearner(Learner, threading.Thread):
'''
Random learner. Simply generates new parameters randomly with a uniform distribution over the boundaries. Learner is perhaps a misnomer for this class.
Args:
**kwargs (Optional dict): Other values to be passed to Learner.
Keyword Args:
min_boundary (Optional [array]): If set to None, overrides default learner values and sets it to a set of value 0. Default None.
max_boundary (Optional [array]): If set to None overides default learner values and sets it to an array of value 1. Default None.
first_params (Optional [array]): The first parameters to test. If None will just randomly sample the initial condition.
trust_region (Optional [float or array]): The trust region defines the maximum distance the learner will travel from the current best set of parameters. If None, the learner will search everywhere. If a float, this number must be between 0 and 1 and defines maximum distance the learner will venture as a percentage of the boundaries. If it is an array, it must have the same size as the number of parameters and the numbers define the maximum absolute distance that can be moved along each direction.
'''
def __init__(self,
trust_region=None,
first_params=None,
**kwargs):
super(RandomLearner,self).__init__(**kwargs)
if ((np.all(np.isfinite(self.min_boundary))&np.all(np.isfinite(self.max_boundary)))==False):
msg = 'Minimum and/or maximum boundaries are NaN or inf. Must both be finite for random learner. Min boundary:' + repr(self.min_boundary) +'. Max boundary:' + repr(self.max_boundary)
self.log.error(msg)
raise ValueError(msg)
if first_params is None:
self.first_params = None
else:
self.first_params = np.array(first_params, dtype=float)
if not self.check_num_params(self.first_params):
msg = 'first_params has the wrong number of parameters:' + repr(self.first_params)
self.log.error(msg)
raise ValueError(msg)
if not self.check_in_boundary(self.first_params):
msg = 'first_params is not in the boundary:' + repr(self.first_params)
self.log.error(msg)
raise ValueError(msg)
# Keep track of best parameters to implement trust region.
self.best_cost = None
self.best_parameters = None
self._set_trust_region(trust_region)
new_values_dict = {
'archive_type': 'random_learner',
'trust_region': self.trust_region,
'has_trust_region': self.has_trust_region,
}
self.archive_dict.update(new_values_dict)
self.log.debug('Random learner init completed.')
def run(self):
'''
Puts the next parameters on the queue which are randomly picked from a uniform distribution between the minimum and maximum boundaries when a cost is added to the cost queue.
'''
self.log.debug('Starting Random Learner')
if self.first_params is None:
next_params = self.min_boundary + nr.rand(self.num_params) * self.diff_boundary
else:
next_params = self.first_params
while not self.end_event.is_set():
try:
cost = self.put_params_and_get_cost(next_params)
except LearnerInterrupt:
break
else:
# Update best parameters if necessary.
if self.best_cost is None or cost < self.best_cost:
self.best_cost = cost
self.best_params = self.all_params[-1]
if self.has_trust_region:
temp_min = np.maximum(self.min_boundary, self.best_params - self.trust_region)
temp_max = np.minimum(self.max_boundary, self.best_params + self.trust_region)
next_params = temp_min + nr.rand(self.num_params) * (temp_max - temp_min)
else:
next_params = self.min_boundary + nr.rand(self.num_params) * self.diff_boundary
self._shut_down()
self.log.debug('Ended Random Learner')
class NelderMeadLearner(Learner, threading.Thread):
'''
Nelder–Mead learner. Executes the Nelder–Mead learner algorithm and stores the needed simplex to estimate the next points.
Args:
params_out_queue (queue): Queue for parameters from controller.
costs_in_queue (queue): Queue for costs for nelder learner. The queue should be populated with cost (float) corresponding to the last parameter sent from the Nelder–Mead Learner. Can be a float('inf') if it was a bad run.
end_event (event): Event to trigger end of learner.
Keyword Args:
initial_simplex_corner (Optional [array]): Array for the initial set of parameters, which is the lowest corner of the initial simplex. If None the initial parameters are randomly sampled if the boundary conditions are provided, or all are set to 0 if boundary conditions are not provided.
initial_simplex_displacements (Optional [array]): Array used to construct the initial simplex. Each array is the positive displacement of the parameters above the init_params. If None and there are no boundary conditions, all are set to 1. If None and there are boundary conditions assumes the initial conditions are scaled. Default None.
initial_simplex_scale (Optional [float]): Creates a simplex using a the boundary conditions and the scaling factor provided. If None uses the init_simplex if provided. If None and init_simplex is not provided, but boundary conditions are is set to 0.5. Default None.
Attributes:
init_simplex_corner (array): Parameters for the corner of the initial simple used.
init_simplex_disp (array): Parameters for the displacements about the simplex corner used to create the initial simple.
simplex_params (array): Parameters of the current simplex
simplex_costs (array): Costs associated with the parameters of the current simplex
'''
def __init__(self,
initial_simplex_corner=None,
initial_simplex_displacements=None,
initial_simplex_scale=None,
**kwargs):
super(NelderMeadLearner,self).__init__(**kwargs)
self.num_boundary_hits = 0
self.rho = 1
self.chi = 2
self.psi = 0.5
self.sigma = 0.5
if initial_simplex_displacements is None and initial_simplex_scale is None:
self.init_simplex_disp = self.diff_boundary * 0.6
self.init_simplex_disp[self.init_simplex_disp==float('inf')] = 1
elif initial_simplex_scale is not None:
initial_simplex_scale = float(initial_simplex_scale)
if initial_simplex_scale>1 or initial_simplex_scale<=0:
msg = 'initial_simplex_scale must be bigger than 0 and less than 1'
self.log.error(msg)
raise ValueError(msg)
self.init_simplex_disp = self.diff_boundary * initial_simplex_scale
elif initial_simplex_displacements is not None:
self.init_simplex_disp = np.array(initial_simplex_displacements, dtype=float)
else:
self.log.error('initial_simplex_displacements and initial_simplex_scale can not both be provided simultaneous.')
if not self.check_num_params(self.init_simplex_disp):
msg = 'There is the wrong number of elements in the initial simplex displacement:' + repr(self.init_simplex_disp)
self.log.error(msg)
raise ValueError(msg)
if np.any(self.init_simplex_disp<0):
msg = 'initial simplex displacements generated from configuration must all be positive'
self.log.error(msg)
raise ValueError(msg)
if not self.check_in_diff_boundary(self.init_simplex_disp):
msg = 'Initial simplex displacements must be within boundaries. init_simplex_disp:'+ repr(self.init_simplex_disp) + '. diff_boundary:' +repr(self.diff_boundary)
self.log.error(msg)
raise ValueError(msg)
if initial_simplex_corner is None:
diff_roll = (self.diff_boundary - self.init_simplex_disp) * nr.rand(self.num_params)
diff_roll[diff_roll==float('+inf')]= 0
self.init_simplex_corner = self.min_boundary
self.init_simplex_corner[self.init_simplex_corner==float('-inf')]=0
self.init_simplex_corner += diff_roll
else:
self.init_simplex_corner = np.array(initial_simplex_corner, dtype=float)
if not self.check_num_params(self.init_simplex_corner):
self.log.error('There is the wrong number of elements in the initial simplex corner:' + repr(self.init_simplex_corner))
if not self.check_in_boundary(self.init_simplex_corner):
msg = 'Initial simplex corner outside of boundaries:' + repr(self.init_simplex_corner)
self.log.error(msg)
raise ValueError(msg)
if not np.all(np.isfinite(self.init_simplex_corner + self.init_simplex_disp)):
msg = 'Initial simplex corner and simplex are not finite numbers. init_simplex_corner:'+ repr(self.init_simplex_corner) + '. init_simplex_disp:' +repr(self.init_simplex_disp)
self.log.error(msg)
raise ValueError(msg)
if not self.check_in_boundary(self.init_simplex_corner + self.init_simplex_disp):
msg = 'Largest boundary of simplex not inside the boundaries:' + repr(self.init_simplex_corner + self.init_simplex_disp)
self.log.error(msg)
raise ValueError(msg)
self.simplex_params = np.zeros((self.num_params + 1, self.num_params), dtype=float)
self.simplex_costs = np.zeros((self.num_params + 1,), dtype=float)
self.archive_dict.update({'archive_type':'nelder_mead_learner',
'initial_simplex_corner':self.init_simplex_corner,
'initial_simplex_displacements':self.init_simplex_disp})
self.log.debug('Nelder–Mead learner init completed.')
def run(self):
'''
Runs Nelder–Mead algorithm to produce new parameters given costs, until end signal is given.
'''
self.log.info('Starting Nelder–Mead Learner')
N = int(self.num_params)
one2np1 = list(range(1, N + 1))
self.simplex_params[0] = self.init_simplex_corner
try:
self.simplex_costs[0] = self.put_params_and_get_cost(self.init_simplex_corner)
except ValueError:
self.log.error('Outside of boundary on initial condition. THIS SHOULD NOT HAPPEN')
raise
except LearnerInterrupt:
self.log.info('Ended Nelder–Mead before end of simplex')
return
for k in range(0, N):
y = np.array(self.init_simplex_corner, copy=True)
y[k] = y[k] + self.init_simplex_disp[k]
self.simplex_params[k + 1] = y
try:
f = self.put_params_and_get_cost(y)
except ValueError:
self.log.error('Outside of boundary on initial condition. THIS SHOULD NOT HAPPEN')
raise
except LearnerInterrupt:
self.log.info('Ended Nelder–Mead before end of simplex')
return
self.simplex_costs[k + 1] = f
ind = np.argsort(self.simplex_costs)
self.simplex_costs = np.take(self.simplex_costs, ind, 0)
# sort so sim[0,:] has the lowest function value
self.simplex_params = np.take(self.simplex_params, ind, 0)
while not self.end_event.is_set():
xbar = np.add.reduce(self.simplex_params[:-1], 0) / N
xr = (1 +self.rho) * xbar -self.rho * self.simplex_params[-1]
if self.check_in_boundary(xr):
try:
fxr = self.put_params_and_get_cost(xr)
except ValueError:
self.log.error('Outside of boundary on first reduce. THIS SHOULD NOT HAPPEN')
raise
except LearnerInterrupt:
break
else:
#Hit boundary so set the cost to positive infinite to ensure reflection
fxr = float('inf')
self.num_boundary_hits+=1
self.log.debug('Hit boundary (reflect): '+str(self.num_boundary_hits)+' times.')
doshrink = 0
if fxr < self.simplex_costs[0]:
xe = (1 +self.rho *self.chi) * xbar -self.rho *self.chi * self.simplex_params[-1]
if self.check_in_boundary(xe):
try:
fxe = self.put_params_and_get_cost(xe)
except ValueError:
self.log.error('Outside of boundary when it should not be. THIS SHOULD NOT HAPPEN')
raise
except LearnerInterrupt:
break
else:
#Hit boundary so set the cost above maximum this ensures the algorithm does a contracting reflection
fxe = fxr+1.0
self.num_boundary_hits+=1
self.log.debug('Hit boundary (expand): '+str(self.num_boundary_hits)+' times.')
if fxe < fxr:
self.simplex_params[-1] = xe
self.simplex_costs[-1] = fxe
else:
self.simplex_params[-1] = xr
self.simplex_costs[-1] = fxr
else: # fsim[0] <= fxr
if fxr < self.simplex_costs[-2]:
self.simplex_params[-1] = xr
self.simplex_costs[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < self.simplex_costs[-1]:
xc = (1 +self.psi *self.rho) * xbar -self.psi *self.rho * self.simplex_params[-1]
try:
fxc = self.put_params_and_get_cost(xc)
except ValueError:
self.log.error('Outside of boundary on contraction: THIS SHOULD NOT HAPPEN')
raise
except LearnerInterrupt:
break
if fxc <= fxr:
self.simplex_params[-1] = xc
self.simplex_costs[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 -self.psi) * xbar +self.psi * self.simplex_params[-1]
try:
fxcc = self.put_params_and_get_cost(xcc)
except ValueError:
self.log.error('Outside of boundary on inside contraction: THIS SHOULD NOT HAPPEN')
raise
except LearnerInterrupt:
break
if fxcc < self.simplex_costs[-1]:
self.simplex_params[-1] = xcc
self.simplex_costs[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
self.simplex_params[j] = self.simplex_params[0] +self.sigma * (self.simplex_params[j] - self.simplex_params[0])
try:
self.simplex_costs[j] = self.put_params_and_get_cost(self.simplex_params[j])
except ValueError:
self.log.error('Outside of boundary on shrink contraction: THIS SHOULD NOT HAPPEN')
raise
except LearnerInterrupt:
break
ind = np.argsort(self.simplex_costs)
self.simplex_params = np.take(self.simplex_params, ind, 0)
self.simplex_costs = np.take(self.simplex_costs, ind, 0)
self._shut_down()
self.log.info('Ended Nelder–Mead')
def update_archive(self):
'''
Update the archive.
'''
super(NelderMeadLearner, self).update_archive()
new_values_dict = {
'simplex_parameters':self.simplex_params,
'simplex_costs':self.simplex_costs,
}
self.archive_dict.update(new_values_dict)
class DifferentialEvolutionLearner(Learner, threading.Thread):
'''
Adaption of the differential evolution algorithm in scipy.
Args:
params_out_queue (queue): Queue for parameters sent to controller.
costs_in_queue (queue): Queue for costs for gaussian process. This must be tuple
end_event (event): Event to trigger end of learner.
Keyword Args:
first_params (Optional [array]): The first parameters to test. If None will just randomly sample the initial condition. Default None.
trust_region (Optional [float or array]): The trust region defines the maximum distance the learner will travel from the current best set of parameters. If None, the learner will search everywhere. If a float, this number must be between 0 and 1 and defines maximum distance the learner will venture as a percentage of the boundaries. If it is an array, it must have the same size as the number of parameters and the numbers define the maximum absolute distance that can be moved along each direction.
evolution_strategy (Optional [string]): the differential evolution strategy to use, options are 'best1', 'best2', 'rand1' and 'rand2'. The default is 'best1'.
population_size (Optional [int]): multiplier proportional to the number of parameters in a generation. The generation population is set to population_size * parameter_num. Default 15.
mutation_scale (Optional [tuple]): The mutation scale when picking new points. Otherwise known as differential weight. When provided as a tuple (min,max) a mutation constant is picked randomly in the interval. Default (0.5,1.0).
cross_over_probability (Optional [float]): The recombination constand or crossover probability, the probability a new points will be added to the population.
restart_tolerance (Optional [float]): when the current population have a spread less than the initial tolerance, namely stdev(curr_pop) < restart_tolerance stdev(init_pop), it is likely the population is now in a minima, and so the search is started again.
Attributes:
has_trust_region (bool): Whether the learner has a trust region.
num_population_members (int): The number of parameters in a generation.
params_generations (list): History of the parameters generations. A list of all the parameters in the population, for each generation created.
costs_generations (list): History of the costs generations. A list of all the costs in the population, for each generation created.
init_std (float): The initial standard deviation in costs of the population. Calculated after sampling (or resampling) the initial population.
curr_std (float): The current standard deviation in costs of the population. Calculated after sampling each generation.
'''
def __init__(self,
first_params = None,
trust_region = None,
evolution_strategy='best1',
population_size=15,
mutation_scale=(0.5, 1),
cross_over_probability=0.7,
restart_tolerance=0.01,
**kwargs):
super(DifferentialEvolutionLearner,self).__init__(**kwargs)
if first_params is None:
self.first_params = float('nan')
else:
self.first_params = np.array(first_params, dtype=float)
if not self.check_num_params(self.first_params):
msg = 'first_params has the wrong number of parameters:' + repr(self.first_params)
self.log.error(msg)
raise ValueError(msg)
if not self.check_in_boundary(self.first_params):
msg = 'first_params is not in the boundary:' + repr(self.first_params)
self.log.error(msg)
raise ValueError(msg)
self._set_trust_region(trust_region)
if evolution_strategy == 'best1':
self.mutation_func = self._best1
elif evolution_strategy == 'best2':
self.mutation_func = self._best2
elif evolution_strategy == 'rand1':
self.mutation_func = self._rand1
elif evolution_strategy == 'rand2':
self.mutation_func = self._rand2
else:
msg = 'Please select a valid mutation strategy'
self.log.error(msg)
raise ValueError(msg)
self.evolution_strategy = evolution_strategy
self.restart_tolerance = restart_tolerance
if len(mutation_scale) == 2 and (np.any(np.array(mutation_scale) <= 2) or np.any(np.array(mutation_scale) > 0)):
self.mutation_scale = mutation_scale
else:
msg = 'Mutation scale must be a tuple with (min,max) between 0 and 2. mutation_scale:' + repr(mutation_scale)
self.log.error(msg)
raise ValueError(msg)
if cross_over_probability <= 1 and cross_over_probability >= 0:
self.cross_over_probability = cross_over_probability
else:
self.log.error('Cross over probability must be between 0 and 1. cross_over_probability:' + repr(cross_over_probability))
if population_size >= 5:
self.population_size = population_size
else:
self.log.error('Population size must be greater or equal to 5:' + repr(population_size))
self.num_population_members = self.population_size * self.num_params
self.first_sample = True
self.params_generations = []
self.costs_generations = []
self.generation_count = 0
self.min_index = 0
self.init_std = 0
self.curr_std = 0
self.archive_dict.update({'archive_type':'differential_evolution',
'evolution_strategy':self.evolution_strategy,
'mutation_scale':self.mutation_scale,
'cross_over_probability':self.cross_over_probability,
'population_size':self.population_size,
'num_population_members':self.num_population_members,
'restart_tolerance':self.restart_tolerance,
'first_params':self.first_params,
'has_trust_region':self.has_trust_region,
'trust_region':self.trust_region})
def run(self):
'''
Runs the Differential Evolution Learner.
'''
try:
self.generate_population()
while not self.end_event.is_set():
self.next_generation()
if self.curr_std < self.restart_tolerance * self.init_std:
self.generate_population()
except LearnerInterrupt:
return
def save_generation(self):
'''
Save history of generations.
'''
self.params_generations.append(np.copy(self.population))
self.costs_generations.append(np.copy(self.population_costs))
self.generation_count += 1
def generate_population(self):
'''
Sample a new random set of variables
'''
self.population = []
self.population_costs = []
self.min_index = 0
if np.all(np.isfinite(self.first_params)) and self.first_sample:
curr_params = self.first_params
self.first_sample = False
else:
curr_params = self.min_boundary + nr.rand(self.num_params) * self.diff_boundary
curr_cost = self.put_params_and_get_cost(curr_params)
self.population.append(curr_params)
self.population_costs.append(curr_cost)
for index in range(1, self.num_population_members):
if self.has_trust_region:
temp_min = np.maximum(self.min_boundary,self.population[self.min_index] - self.trust_region)
temp_max = np.minimum(self.max_boundary,self.population[self.min_index] + self.trust_region)
curr_params = temp_min + nr.rand(self.num_params) * (temp_max - temp_min)
else:
curr_params = self.min_boundary + nr.rand(self.num_params) * self.diff_boundary
curr_cost = self.put_params_and_get_cost(curr_params)
self.population.append(curr_params)
self.population_costs.append(curr_cost)
if curr_cost < self.population_costs[self.min_index]:
self.min_index = index
self.population = np.array(self.population)
self.population_costs = np.array(self.population_costs)
self.init_std = np.std(self.population_costs)
self.curr_std = self.init_std
self.save_generation()
def next_generation(self):
'''
Evolve the population by a single generation
'''
self.curr_scale = nr.uniform(self.mutation_scale[0], self.mutation_scale[1])
for index in range(self.num_population_members):
curr_params = self.mutate(index)
curr_cost = self.put_params_and_get_cost(curr_params)
if curr_cost < self.population_costs[index]:
self.population[index] = curr_params
self.population_costs[index] = curr_cost
if curr_cost < self.population_costs[self.min_index]:
self.min_index = index
self.curr_std = np.std(self.population_costs)
self.save_generation()
def mutate(self, index):
'''
Mutate the parameters at index.
Args:
index (int): Index of the point to be mutated.
'''
fill_point = nr.randint(0, self.num_params)
candidate_params = self.mutation_func(index)
crossovers = nr.rand(self.num_params) < self.cross_over_probability
crossovers[fill_point] = True
mutated_params = np.where(crossovers, candidate_params, self.population[index])
if self.has_trust_region:
temp_min = np.maximum(self.min_boundary,self.population[self.min_index] - self.trust_region)
temp_max = np.minimum(self.max_boundary,self.population[self.min_index] + self.trust_region)
rand_params = temp_min + nr.rand(self.num_params) * (temp_max - temp_min)
else:
rand_params = self.min_boundary + nr.rand(self.num_params) * self.diff_boundary
projected_params = np.where(np.logical_or(mutated_params < self.min_boundary, mutated_params > self.max_boundary), rand_params, mutated_params)
return projected_params
def _best1(self, index):
'''
Use best parameters and two others to generate mutation.
Args:
index (int): Index of member to mutate.
'''
r0, r1 = self.random_index_sample(index, 2)
return (self.population[self.min_index] + self.curr_scale *(self.population[r0] - self.population[r1]))
def _rand1(self, index):
'''
Use three random parameters to generate mutation.
Args:
index (int): Index of member to mutate.
'''
r0, r1, r2 = self.random_index_sample(index, 3)
return (self.population[r0] + self.curr_scale * (self.population[r1] - self.population[r2]))
def _best2(self, index):
'''
Use best parameters and four others to generate mutation.
Args:
index (int): Index of member to mutate.
'''
r0, r1, r2, r3 = self.random_index_sample(index, 4)
return self.population[self.min_index] + self.curr_scale * (self.population[r0] + self.population[r1] - self.population[r2] - self.population[r3])
def _rand2(self, index):
'''
Use five random parameters to generate mutation.
Args:
index (int): Index of member to mutate.
'''
r0, r1, r2, r3, r4 = self.random_index_sample(index, 5)
return self.population[r0] + self.curr_scale * (self.population[r1] + self.population[r2] - self.population[r3] - self.population[r4])
def random_index_sample(self, index, num_picks):
'''
Randomly select a num_picks of indexes, without index.
Args:
index(int): The index that is not included
num_picks(int): The number of picks.
'''
rand_indexes = list(range(self.num_population_members))
rand_indexes.remove(index)
return random.sample(rand_indexes, num_picks)
def update_archive(self):
'''
Update the archive.
'''
super(DifferentialEvolutionLearner, self).update_archive()
new_values_dict = {
'params_generations':self.params_generations,
'costs_generations':self.costs_generations,
'population':self.population,
'population_costs':self.population_costs,
'init_std':self.init_std,
'curr_std':self.curr_std,
'generation_count':self.generation_count,
}
self.archive_dict.update(new_values_dict)
class MachineLearner(Learner):
'''
A parent class for more specific machine learer classes.
This class is not intended to be used directly.
Keyword Args:
trust_region (Optional [float or array]): The trust region defines the
maximum distance the learner will travel from the current best set
of parameters. If None, the learner will search everywhere. If a
float, this number must be between 0 and 1 and defines maximum
distance the learner will venture as a percentage of the boundaries.
If it is an array, it must have the same size as the number of
parameters and the numbers define the maximum absolute distance that
can be moved along each direction.
default_bad_cost (Optional [float]): If a run is reported as bad and
`default_bad_cost` is provided, the cost for the bad run is set to
this default value. If `default_bad_cost` is `None`, then the worst
cost received is set to all the bad runs. Default `None`.
default_bad_uncertainty (Optional [float]): If a run is reported as bad
and `default_bad_uncertainty` is provided, the uncertainty for the
bad run is set to this default value. If `default_bad_uncertainty`
is `None`, then the uncertainty is set to a tenth of the best to
`worst cost range. Default `None`.
minimum_uncertainty (Optional [float]): The minimum uncertainty
associated with provided costs. Must be above zero to avoid fitting
errors. Default `1e-8`.
predict_global_minima_at_end (Optional [bool]): If `True` finds the
global minima when the learner is ended. Does not if `False`.
Default `True`.
training_filename (Optional [str]): The name of a learner archive from a
previous optimization from which to extract past results for use in
the current optimization. If `None`, no past results will be used.
Default `None`.
training_file_type (Optional [str]): File type of the training archive.
Can be `'txt'`, `'pkl'`, `'mat'`, or `None`. If set to `None`, then
the file type will be determined automatically. This argument has no
effect if `training_filename` is set to `None`. Default `None`.
Attributes:
all_params (array): Array containing all parameters sent to learner.
all_costs (array): Array containing all costs sent to learner.
all_uncers (array): Array containing all uncertainties sent to learner.
scaled_costs (array): Array contaning all the costs scaled to have zero mean and a standard deviation of 1. Needed for training the learner.
bad_run_indexs (list): list of indexes to all runs that were marked as bad.
best_cost (float): Minimum received cost, updated during execution.
best_params (array): Parameters of best run. (reference to element in params array).
best_index (int): index of the best cost and params.
worst_cost (float): Maximum received cost, updated during execution.
worst_index (int): index to run with worst cost.
cost_range (float): Difference between worst_cost and best_cost
params_count (int): Counter for the number of parameters asked to be evaluated by the learner.
has_trust_region (bool): Whether the learner has a trust region.
'''
def __init__(self,
trust_region=None,
default_bad_cost = None,
default_bad_uncertainty = None,
minimum_uncertainty = 1e-8,
predict_global_minima_at_end = True,
training_filename=None,
training_file_type=None,
**kwargs):
# Prepare logger now so that logging can be done before calling parent's
# __init__() method.
self._prepare_logger()
if training_filename is not None:
# Automatically determine training_file_type if necessary.
training_filename = str(training_filename)
if training_file_type is None:
training_file_type = mlu.get_file_type(training_filename)
training_file_type = str(training_file_type)
if not mlu.check_file_type_supported(training_file_type):
msg = 'Training file type not supported: ' + repr(training_file_type)
self.log.error(msg)
raise ValueError(msg)
self.training_file_dir = os.path.dirname(training_filename)
# Get the training dictionary.
training_dict = mlu.get_dict_from_file(
training_filename,
training_file_type,
)
self.training_dict = training_dict
# Parameters that must match the values in the training archive.
num_params = int(training_dict['num_params'])
kwargs['num_params'] = self._reconcile_kwarg_and_training_val(
kwargs,
'num_params',
num_params,
)
# Run parent's __init__() now so that it gets the updated value for
# num_params but its empty values for all_params, etc., get
# overwritten below.
super(MachineLearner, self).__init__(**kwargs)
# Data that must be present in any archive type.
self.all_params = np.array(training_dict['all_params'], dtype=float)
self.all_costs = mlu.safe_cast_to_array(training_dict['all_costs'])
self.all_uncers = mlu.safe_cast_to_array(training_dict['all_uncers'])
self.bad_run_indexs = mlu.safe_cast_to_list(training_dict['bad_run_indexs'])
# Learner archives from GaussianProcessLearner and NeuralNetLearner
# made with versions of M-LOOP <= 3.1.1 had a bug where
# bad_run_indexs was a list of lists. Flatten the list if it has
# that formatting.
if self.bad_run_indexs and isinstance(self.bad_run_indexs[0], list):
self.bad_run_indexs = [
index for sublist in self.bad_run_indexs for index in sublist
]
# Data that may be in the archive, but can easily be calculated if
# necessary.
# costs_count
costs_count = training_dict.get(
'costs_count',
len(self.all_costs),
)
self.costs_count = int(costs_count)
# best_index
best_index = training_dict.get(
'best_index',
np.argmin(self.all_costs),
)
self.best_index = int(best_index)
# best_cost
best_cost = training_dict.get(
'best_cost',
self.all_costs[self.best_index],
)
self.best_cost = float(best_cost)
# best_params
best_params = training_dict.get(
'best_params',
self.all_params[self.best_index],
)
self.best_params = mlu.safe_cast_to_array(best_params)
# worst_index
worst_index = training_dict.get(
'worst_index',
np.argmax(self.all_costs),
)
self.worst_index = int(worst_index)
# worst_cost
worst_cost = training_dict.get(
'worst_cost',
self.all_costs[self.worst_index],
)
self.worst_cost = float(worst_cost)
# cost_range
cost_range = training_dict.get(
'cost_range',
(self.worst_cost - self.best_cost),
)
self.cost_range = float(cost_range)
# Parameters that must be the same in keyword arguments and in the
# training archive in order to load some of the data.
# learner type
self._learner_type_matches_training_archive = True
learner_type_train = self.training_dict['archive_type']
if learner_type_train != self._ARCHIVE_TYPE:
self._learner_type_matches_training_archive = False
# min_boundary
self._boundaries_match_training_archive = True
min_boundary_train = self.training_dict['min_boundary']
min_boundary_train = mlu.safe_cast_to_array(min_boundary_train)
are_same = np.array_equal(
kwargs.get('min_boundary'),
min_boundary_train,
)
if not are_same:
self._boundaries_match_training_archive = False
# max_boundary
max_boundary_train = self.training_dict['max_boundary']
max_boundary_train = mlu.safe_cast_to_array(max_boundary_train)
are_same = np.array_equal(
kwargs.get('max_boundary'),
max_boundary_train,
)
if not are_same:
self._boundaries_match_training_archive = False
else:
super(MachineLearner, self).__init__(**kwargs)
self._learner_type_matches_training_archive = False
self._boundaries_match_training_archive = False
#Storage variables, archived
self.best_cost = float('inf')
self.best_params = float('nan')
self.best_index = 0
self.worst_cost = float('-inf')
self.worst_index = 0
self.cost_range = float('inf')
self.costs_count = 0
# Parameters that should only be loaded if a training archive was
# provided and it has the same learner type and min/max boundaries.
same_learner_type = self._learner_type_matches_training_archive
same_boundaries = self._boundaries_match_training_archive
if same_learner_type and same_boundaries:
training_dict = self.training_dict
# Counters
self.params_count = int(training_dict['params_count'])
# Predicted optimum
try:
self.predicted_best_parameters = mlu.safe_cast_to_array(
training_dict['predicted_best_parameters']
)
self.predicted_best_cost = float(
training_dict['predicted_best_cost']
)
self.predicted_best_uncertainty = float(
training_dict['predicted_best_uncertainty']
)
self.has_global_minima = True
except KeyError:
self.has_global_minima = False
else:
# Counters
self.params_count = 0
# Predicted optimum
self.has_global_minima = False
# Multiprocessor controls
self.new_params_event = mp.Event()
# Storage variables and counters
self.search_params = []
self.scaled_costs = None
# Constants, limits and tolerances
self.search_precision = 1.0e-6
self.parameter_searches = max(10, self.num_params)
self.bad_uncer_frac = 0.1 # Fraction of cost range to set a bad run uncertainty
# Optional user set variables
self._set_trust_region(trust_region)
self.predict_global_minima_at_end = bool(predict_global_minima_at_end)
self.minimum_uncertainty = float(minimum_uncertainty)
if default_bad_cost is not None:
self.default_bad_cost = float(default_bad_cost)
else:
self.default_bad_cost = None
if default_bad_uncertainty is not None:
self.default_bad_uncertainty = float(default_bad_uncertainty)
else:
self.default_bad_uncertainty = None
if (self.default_bad_cost is None) and (self.default_bad_uncertainty is None):
self.bad_defaults_set = False
elif (self.default_bad_cost is not None) and (self.default_bad_uncertainty is not None):
self.bad_defaults_set = True
else:
msg = 'Both the default cost and uncertainty must be set for a bad run or they must both be set to None.'
self.log.error(msg)
raise ValueError(msg)
if self.minimum_uncertainty <= 0:
msg = 'Minimum uncertainty must be larger than zero for the learner.'
self.log.error(msg)
raise ValueError(msg)
#Search bounds
self.search_min = self.min_boundary
self.search_max = self.max_boundary
self.search_diff = self.search_max - self.search_min
self.search_region = np.transpose([self.search_min, self.search_max])
# Update archive.
new_values_dict = {
'search_precision': self.search_precision,
'parameter_searches': self.parameter_searches,
'bad_uncer_frac': self.bad_uncer_frac,
'trust_region': self.trust_region,
'has_trust_region': self.has_trust_region,
'predict_global_minima_at_end': self.predict_global_minima_at_end,
}
self.archive_dict.update(new_values_dict)
def _reconcile_kwarg_and_training_val(self, kwargs_, name, training_value):
'''Utility function for comparing values from kwargs to training values.
When a training archive is specified there can be two values specified
for some parameters; one from user's config/kwargs and one from the
training archive. This function compares the values. If the values are
the same then the value is returned, and if they are different a
`ValueError` is raised. Care is taken not to raise that error though if
one of the values is `None` since that can mean that a value wasn't
specified. In that case the other value is returned, or `None` is
returned if they are both `None`.
Args:
kwargs_ ([dict]): The dictionary of keyword arguments passed to
`__init__()`.
name ([str]): The name of the parameter.
training_value ([any]): The value for the parameter in the training
archive.
Raises:
ValueError: A `ValueError` is raised if the value of the parameter
in the keyword arguments doesn't match the value from the
training archive.
Returns:
[any]: The value for the parameter, taken from either `kwargs_` or
`training_value`, or both if they are the same.
'''
if kwargs_.get(name) is None:
# No non-default value provided in kwargs_, so use the training
# value.
return training_value
elif training_value is None:
# Have a non-default value in kwargs_ but training_value is None, so
# use the value from kwargs_.
return kwargs_[name]
else:
# In this case both kwargs_ and and training_value are non-default.
# If they are the same, then return their common value. If they are
# different raise an error to alert the user.
if isinstance(kwargs_[name], np.ndarray) or isinstance(training_value, np.ndarray):
same = np.array_equal(kwargs_[name], training_value)
else:
same = (kwargs_[name] == training_value)
if same:
return training_value
else:
msg = ("Value passed for {name} ({kwargs_val}) does not match "
"value in training archive ({training_value}).").format(
name=name,
kwargs_val=kwargs_[name],
training_value=training_value,
)
self.log.error(msg)
raise ValueError(msg)
def update_archive(self):
'''
Update the archive.
'''
super(MachineLearner, self).update_archive()
new_values_dict = {
'best_cost':self.best_cost,
'best_params':self.best_params,
'best_index':self.best_index,
'worst_cost':self.worst_cost,
'worst_index':self.worst_index,
'cost_range':self.cost_range,
'costs_count':self.costs_count,
'params_count':self.params_count,
}
self.archive_dict.update(new_values_dict)
def wait_for_new_params_event(self):
'''
Waits for a new parameters event and starts a new parameter generation cycle.
Also checks end event and will break if it is triggered.
'''
while not self.end_event.is_set():
if self.new_params_event.wait(timeout=self.learner_wait):
self.new_params_event.clear()
break
else:
continue
else:
self.log.debug('Learner end signal received. Ending')
raise LearnerInterrupt
def get_params_and_costs(self):
'''
Get the parameters and costs from the queue and place in their appropriate all_[type] arrays.
Also updates bad costs, best parameters, and search boundaries given trust region.
'''
new_params = []
new_costs = []
new_uncers = []
new_bads = []
update_bads_flag = False
first_dequeue = True
while True:
if first_dequeue:
try:
# Block for 1s, because there might be a race with the
# new_params_event being set. See comment in
# controllers.MachineLearnerController._optimization_routine().
(param, cost, uncer, bad) = self.costs_in_queue.get(block=True, timeout=1)
first_dequeue = False
except mlu.empty_exception:
msg = 'Learner asked for new parameters but no new costs were provided after 1s.'
self.log.error(msg)
raise ValueError(msg)
else:
try:
(param, cost, uncer, bad) = self.costs_in_queue.get_nowait()
except mlu.empty_exception:
break
self.costs_count +=1
if bad:
new_bads.append(self.costs_count-1)
if self.bad_defaults_set:
cost = self.default_bad_cost
uncer = self.default_bad_uncertainty
else:
cost = self.worst_cost
uncer = self.cost_range*self.bad_uncer_frac
message = (param, cost, uncer, bad)
param, cost, uncer, bad = self._parse_cost_message(message)
uncer = max(uncer, self.minimum_uncertainty)
cost_change_flag = False
if cost > self.worst_cost:
self.worst_cost = cost
self.worst_index = self.costs_count-1
cost_change_flag = True
if cost < self.best_cost:
self.best_cost = cost
self.best_params = param
self.best_index = self.costs_count-1
cost_change_flag = True
if cost_change_flag:
self.cost_range = self.worst_cost - self.best_cost
if not self.bad_defaults_set:
update_bads_flag = True
new_params.append(param)
new_costs.append(cost)
new_uncers.append(uncer)
if self.all_params.size==0:
self.all_params = np.array(new_params, dtype=float)
self.all_costs = np.array(new_costs, dtype=float)
self.all_uncers = np.array(new_uncers, dtype=float)
else:
self.all_params = np.concatenate((self.all_params, np.array(new_params, dtype=float)))
self.all_costs = np.concatenate((self.all_costs, np.array(new_costs, dtype=float)))
self.all_uncers = np.concatenate((self.all_uncers, np.array(new_uncers, dtype=float)))
self.bad_run_indexs.extend(new_bads)
if self.all_params.shape != (self.costs_count,self.num_params):
self.log('Saved params are the wrong size. THIS SHOULD NOT HAPPEN:' + repr(self.all_params))
if self.all_costs.shape != (self.costs_count,):
self.log('Saved costs are the wrong size. THIS SHOULD NOT HAPPEN:' + repr(self.all_costs))
if self.all_uncers.shape != (self.costs_count,):
self.log('Saved uncertainties are the wrong size. THIS SHOULD NOT HAPPEN:' + repr(self.all_uncers))
if update_bads_flag:
self.update_bads()
self.update_search_region()
def update_bads(self):
'''
Best and/or worst costs have changed, update the values associated with bad runs accordingly.
'''
for index in self.bad_run_indexs:
self.all_costs[index] = self.worst_cost
self.all_uncers[index] = self.cost_range*self.bad_uncer_frac
def update_search_region(self):
'''
If trust boundaries is not none, updates the search boundaries based on the defined trust region.
'''
if self.has_trust_region:
self.search_min = np.maximum(self.best_params - self.trust_region, self.min_boundary)
self.search_max = np.minimum(self.best_params + self.trust_region, self.max_boundary)
self.search_diff = self.search_max - self.search_min
self.search_region = np.transpose([self.search_min, self.search_max])
def update_search_params(self):
'''
Update the list of parameters to use for the next search.
'''
self.search_params = []
self.search_params.append(self.best_params)
for _ in range(self.parameter_searches):
self.search_params.append(self.search_min + nr.uniform(size=self.num_params) * self.search_diff)
def _find_predicted_minimum(
self,
scaled_figure_of_merit_function,
scaled_search_region,
params_scaler,
scaled_jacobian_function=None,
):
'''
Find the predicted minimum of `scaled_figure_of_merit_function()`.
The search for the minimum is constrained to be within
`scaled_search_region`.
The `scaled_figure_of_merit_function()` should take inputs in scaled
units and generate outputs in scaled units. This is necessary because
`scipy.optimize.minimize()` (which is used internally here) can struggle
if the numbers are too small or too large. Using scaled parameters and
figures of merit brings the numbers closer to ~1, which can improve the
behavior of `scipy.optimize.minimize()`.
Args:
scaled_figure_of_merit_function (function): This should be a
function which accepts an array of scaled parameter values and
returns a predicted figure of merit. Importantly, both the input
parameter values and the returned value should be in scaled
units.
scaled_search_region (array): The scaled parameter-space bounds for
the search. The returned minimum position will be constrained to
be within this region. The `scaled_search_region` should be a 2D
array of shape `(self.num_params, 2)` where the first column
specifies lower bounds and the second column specifies upper
bounds for each parameter (in scaled units).
params_scaler (mloop.utilities.ParameterScaler): A `ParameterScaler`
instance for converting parameters to scaled units.
scaled_jacobian_function (function, optional): An optional function
giving the Jacobian of `scaled_figure_of_merit_function()` which
will be used by `scipy.optimize.minimize()` if provided. As with
`scaled_figure_of_merit_function()`, the
`scaled_jacobian_function()` should accept and return values in
scaled units. If `None` then no Jacobian will be provided to
`scipy.optimize.minimize()`. Defaults to `None`.
Returns:
best_scaled_params (array): The scaled parameter values which
minimize `scaled_figure_of_merit_function()` within
`scaled_search_region`. They are provided as a 1D array of
values in scaled units.
'''
# Generate the list of starting points for the search.
self.update_search_params()
# Search for parameters which minimize the provided
# scaled_figure_of_merit_function, starting at a few different points in
# parameter-space. The search for the next parameters will be performed
# in scaled units because so.minimize() can struggle with very large or
# very small values.
best_scaled_cost = float('inf')
best_scaled_params = None
for start_params in self.search_params:
scaled_start_parameters = params_scaler.transform(
[start_params],
)
# Extract 1D array from 2D array.
scaled_start_parameters = scaled_start_parameters[0]
result = so.minimize(
scaled_figure_of_merit_function,
scaled_start_parameters,
jac=scaled_jacobian_function,
bounds=scaled_search_region,
tol=self.search_precision,
)
# Check if these parameters give better predicted results than any
# others found so far in this search.
current_best_scaled_cost = result.fun
curr_best_scaled_params = result.x
if current_best_scaled_cost < best_scaled_cost:
best_scaled_cost = current_best_scaled_cost
best_scaled_params = curr_best_scaled_params
return best_scaled_params
class GaussianProcessLearner(MachineLearner, mp.Process):
'''
Gaussian process learner.
Generates new parameters based on a gaussian process fitted to all previous
data.
Args:
params_out_queue (queue): Queue for parameters sent to controller.
costs_in_queue (queue): Queue for costs for gaussian process. This must
be tuple.
end_event (event): Event to trigger end of learner.
Keyword Args:
length_scale (Optional [array]): The initial guess for length scale(s)
of the gaussian process. The array can either of size one or the
number of parameters or `None`. If it is size one, it is assumed
that all of the correlation lengths are the same. If it is an array
with length equal to the number of the parameters then all the
parameters have their own independent length scale. If it is set to
`None` and a learner archive from a Gaussian process optimization
is provided for `gp_training_filename`, then it will be set to the
value recorded for `length_scale` in that learner archive. If set to
`None` but `gp_training_filename` does not specify a learner archive
from a Guassian process optimization, then it is assumed that all of
the length scales should be independent and they are all given an
initial value of equal to one tenth of their allowed range. Default
`None`.
length_scale_bounds (Optional [array]): The limits on the fitted length
scale values, specified as a single pair of numbers e.g.
`[min, max]`, or a list of pairs of numbers, e.g.
`[[min_0, max_0], ..., [min_N, max_N]]`. This only has an effect if
`update_hyperparameters` is set to `True`. If one pair is provided,
the same limits will be used for all length scales. Alternatively
one pair of `[min, max]` can be provided for each length scale. For
example, possible valid values include `[1e-5, 1e5]` and
`[[1e-2, 1e2], [5, 5], [1.6e-4, 1e3]]` for optimizations with three
parameters. If set to `None`, then the length scale will be bounded
to be between `0.001` and `10` times the allowed range for each
parameter.
update_hyperparameters (Optional [bool]): Whether the length scales and
noise estimate should be updated when new data is provided. Default
`True`.
cost_has_noise (Optional [bool]): If `True` the learner assumes there is
common additive white noise that corrupts the costs provided. This
noise is assumed to be on top of the uncertainty in the costs (if it
is provided). If `False`, it is assumed that there is no noise in
the cost (or if uncertainties are provided no extra noise beyond the
uncertainty). Default `True`.
noise_level (Optional [float]): The initial guess for the noise level
(variance, not standard deviation) in the costs. This is only used
if `cost_has_noise` is `True`. If it is set to `None` and a learner
archive from a Gaussian process optimization is provided for
`gp_training_filename`, then it will be set to the value recorded
for `noise_level` in that learner archive. If set to `None` but
`gp_training_filename` does not specify a learner archive from a
Guassian process optimization, then it will automatically be set to
the variance of the training data costs.
noise_level_bounds (Optional [array]): The limits on the fitted
`noise_level` values, specified as a single pair of numbers
`[min, max]`. This only has an effect if `update_hyperparameters`
and `cost_has_noise` are both set to `True`. If set to `None`, the
value `[1e-5 * var, 1e5 * var]` will be used where `var` is the
variance of the training data costs. Default `None`.
gp_training_filename (Optional [str]): The name of a learner archive
from a previous optimization from which to extract past results for
use in the current optimization. If `None`, no past results will be
used. Default `None`.
gp_training_file_type (Optional [str]): File type of the training
archive. Can be `'txt'`, `'pkl'`, `'mat'`, or `None`. If set to
`None`, then the file type will be determined automatically. This
argument has no effect if `gp_training_filename` is set to `None`.
Default `None`.
trust_region (Optional [float or array]): The trust region defines the
maximum distance the learner will travel from the current best set
of parameters. If `None`, the learner will search everywhere. If a
float, this number must be between 0 and 1 and defines maximum
distance the learner will venture as a percentage of the boundaries.
If it is an array, it must have the same size as the number of
parameters and the numbers define the maximum absolute distance that
can be moved along each direction.
default_bad_cost (Optional [float]): If a run is reported as bad and
`default_bad_cost` is provided, the cost for the bad run is set to
this default value. If `default_bad_cost` is `None`, then the worst
cost received is set to all the bad runs. Default `None`.
default_bad_uncertainty (Optional [float]): If a run is reported as bad
and `default_bad_uncertainty` is provided, the uncertainty for the
bad run is set to this default value. If `default_bad_uncertainty`
is `None`, then the uncertainty is set to a tenth of the best to
worst cost range. Default `None`.
minimum_uncertainty (Optional [float]): The minimum uncertainty
associated with provided costs. Must be above zero to avoid fitting
errors. Default `1e-8`.
predict_global_minima_at_end (Optional [bool]): If `True` attempts to
find the global minima when the learner is ended. Does not if
`False`. Default `True`.
Attributes:
all_params (array): Array containing all parameters sent to learner.
all_costs (array): Array containing all costs sent to learner.
all_uncers (array): Array containing all uncertainties sent to learner.
scaled_costs (array): Array contaning all the costs scaled to have zero
mean and a standard deviation of 1. Needed for training the gaussian
process.
bad_run_indexs (list): list of indexes to all runs that were marked as
bad.
best_cost (float): Minimum received cost, updated during execution.
best_params (array): Parameters of best run. (reference to element in
params array).
best_index (int): index of the best cost and params.
worst_cost (float): Maximum received cost, updated during execution.
worst_index (int): index to run with worst cost.
cost_range (float): Difference between `worst_cost` and `best_cost`.
generation_num (int): Number of sets of parameters to generate each
generation. Set to `4`.
length_scale_history (list): List of length scales found after each fit.
noise_level_history (list): List of noise levels found after each fit.
fit_count (int): Counter for the number of times the gaussian process
has been fit.
cost_count (int): Counter for the number of costs, parameters and
uncertainties added to learner.
params_count (int): Counter for the number of parameters asked to be
evaluated by the learner.
gaussian_process (GaussianProcessRegressor): Gaussian process that is
fitted to data and used to make predictions
cost_scaler (StandardScaler): Scaler used to normalize the provided
costs.
params_scaler (StandardScaler): Scaler used to normalize the provided
parameters.
has_trust_region (bool): Whether the learner has a trust region.
'''
_ARCHIVE_TYPE = 'gaussian_process_learner'
_DEFAULT_SCALED_LENGTH_SCALE = 1e-1
_DEFAULT_SCALED_LENGTH_SCALE_BOUNDS = np.array([1e-3, 1e1])
def __init__(self,
length_scale = None,
length_scale_bounds=None,
update_hyperparameters = True,
cost_has_noise=True,
noise_level=None,
noise_level_bounds=None,
gp_training_filename =None,
gp_training_file_type = None,
**kwargs):
if gp_training_filename is not None:
super(GaussianProcessLearner,self).__init__(
training_filename=gp_training_filename,
training_file_type=gp_training_file_type,
**kwargs
)
# Maintain backwards compatability with archives generated by
# previous versions of M-LOOP.
# M-LOOP versions <= 3.1.1 didn't scale noise level and didn't
# record the M-LOOP version. Mark that noise levels should be
# unscaled later, which is necessary for plotting for archives from
# older versions of M-LOOP. Since only Gaussian learner archives
# save a noise level, this should only be done if the training
# archive was from a Gaussian learner optimization.
self._scale_deprecated_noise_levels = False
if self._learner_type_matches_training_archive:
if 'mloop_version' not in self.training_dict:
self._scale_deprecated_noise_levels = True
else:
super(GaussianProcessLearner,self).__init__(**kwargs)
# Maintain backwards compatability with archives generated by
# previous versions of M-LOOP.
self._scale_deprecated_noise_levels = False
# Parameters that should only be loaded if a training archive was
# provided and it has the same learner type.
if self._learner_type_matches_training_archive:
training_dict = self.training_dict
# Storage variables, archived
self.length_scale_history = list(
training_dict['length_scale_history']
)
self.noise_level_history = mlu.safe_cast_to_list(
training_dict['noise_level_history']
)
# Counters
self.fit_count = int(training_dict['fit_count'])
# Fit parameters that can be overriden by user keyword options.
if length_scale is None:
length_scale = mlu.safe_cast_to_array(
training_dict['length_scale'],
)
if noise_level is None:
noise_level = float(training_dict['noise_level'])
# The options below are not present in archives from M-LOOP <= 3.1.1
# so they need an extra check to see if there are values available
# for them.
if length_scale_bounds is None:
if 'length_scale_bounds' in training_dict:
length_scale_bounds = mlu.safe_cast_to_array(
training_dict['length_scale_bounds'],
)
if noise_level_bounds is None:
if 'noise_level_bounds' in training_dict:
noise_level_bounds = mlu.safe_cast_to_array(
training_dict['noise_level_bounds'],
)
else:
# Storage variables, archived
self.length_scale_history = []
self.noise_level_history = []
# Counters
self.fit_count = 0
#Storage variables and counters
self.scaled_uncers = None
self.scaled_noise_level = None
self.scaled_noise_level_bounds = None
self.cost_bias = None
self.uncer_bias = None
#Internal variable for bias function
self.bias_func_cycle = 4
self.bias_func_cost_factor = [1.0,1.0,1.0,1.0]
self.bias_func_uncer_factor =[0.0,1.0,2.0,3.0]
self.generation_num = self.bias_func_cycle
if self.generation_num < 3:
msg = 'Number in generation must be larger than 2.'
self.log.error(msg)
raise ValueError(msg)
#Constants, limits and tolerances
self.hyperparameter_searches = max(10,self.num_params)
# Scalers for the costs and parameter values.
self.cost_scaler = skp.StandardScaler()
self.params_scaler = mlu.ParameterScaler(
self.min_boundary,
self.max_boundary,
)
# Fit the scaler to the min/max boundaries.
self.params_scaler.partial_fit()
# Optional user set variables
self.cost_has_noise = bool(cost_has_noise)
self.update_hyperparameters = bool(update_hyperparameters)
# Length scale.
if length_scale is None:
self.scaled_length_scale = self._DEFAULT_SCALED_LENGTH_SCALE
self.length_scale = self._transform_length_scales(
self.scaled_length_scale,
inverse=True,
)
else:
self.length_scale = np.array(length_scale, dtype=float)
self.scaled_length_scale = self._transform_length_scales(
self.length_scale,
)
# Noise level.
if noise_level is None:
# Temporarily change to NaN to mark that the default value
# should be calcualted once training data is available. Using
# NaN instead of None is necessary in case the archive is saved
# in .mat format since it can handle NaN but not None.
self.noise_level = float('nan')
else:
self.noise_level = float(noise_level)
# Length scale bounds.
if length_scale_bounds is None:
self.scaled_length_scale_bounds = self._DEFAULT_SCALED_LENGTH_SCALE_BOUNDS
self.length_scale_bounds = self._transform_length_scale_bounds(
self.scaled_length_scale_bounds,
inverse=True,
)
else:
self.length_scale_bounds = mlu.safe_cast_to_array(
length_scale_bounds,
)
self.scaled_length_scale_bounds = self._transform_length_scale_bounds(
self.length_scale_bounds,
)
# Noise level bounds.
if noise_level_bounds is None:
self.noise_level_bounds = float('nan')
else:
self.noise_level_bounds = mlu.safe_cast_to_array(noise_level_bounds)
#Checks of variables
if self.length_scale.size == 1:
self.length_scale = float(self.length_scale)
elif not self.check_num_params(self.length_scale):
msg = 'Correlation lengths not the right size and shape, must be one or the number of parameters:' + repr(self.length_scale)
self.log.error(msg)
raise ValueError(msg)
if not np.all(self.length_scale >0):
msg = 'Correlation lengths must all be positive numbers:' + repr(self.length_scale)
self.log.error(msg)
raise ValueError(msg)
self._check_length_scale_bounds()
if self.noise_level < 0:
msg = 'noise_level must be greater or equal to zero:' +repr(self.noise_level)
self.log.error(msg)
raise ValueError(msg)
self._check_noise_level_bounds()
if self.default_bad_uncertainty is not None:
if self.default_bad_uncertainty < 0:
msg = 'Default bad uncertainty must be positive.'
self.log.error(msg)
raise ValueError(msg)
self.gaussian_process = None
# Update archive.
new_values_dict = {
'archive_type': self._ARCHIVE_TYPE,
'cost_has_noise': self.cost_has_noise,
'length_scale_history': self.length_scale_history,
'length_scale_bounds': self.length_scale_bounds,
'noise_level_history': self.noise_level_history,
'noise_level_bounds': self.noise_level_bounds,
'bias_func_cycle': self.bias_func_cycle,
'bias_func_cost_factor': self.bias_func_cost_factor,
'bias_func_uncer_factor': self.bias_func_uncer_factor,
'generation_num': self.generation_num,
'update_hyperparameters': self.update_hyperparameters,
'hyperparameter_searches': self.hyperparameter_searches,
}
self.archive_dict.update(new_values_dict)
#Remove logger so gaussian process can be safely picked for multiprocessing on Windows
self.log = None
def _transform_length_scales(self, length_scales, inverse=False):
'''
Transform length scales to or from scaled units.
This method uses `self.params_scaler` to transform length scales to/from
scaled units. To transform from real/unscaled units to scaled units,
call this method with `inverse` set to `False`. To perform the inverse
transformation, namely to transform length scales from scaled units to
real/unscaled units, call this method with `inverse` set to `True`.
Notably length scales should be scaled, but not offset, when they are
transformed. For this reason, they should not simply be passed through
`self.params_scaler.transform()` and instead should be passed through
this method.
Although `length_scales` can be a fingle float, this method always
returns a 1D array because the scaling factors aren't generally the same
for all of the parameters. This implies that transforming a float then
performing the inverse transformation will yield a 1D array of identical
entries rather than a single float.
Args:
length_scales (float or array): Length scale(s) for the Gaussian
process which should be transformed to or from scaled units. Can
be either a single float or a 1D array of length
`self.num_params`.
inverse (bool): This argument controls whether the forward or
inverse transformation is applied. If `False`, then the forward
transformation is applied, which takes `length_scales` in
real/unscaled units and transforms them to scaled units. If
`True` then this method assumes that `length_scales` are in
scaled units and transforms them into real/unscaled units.
Default `False`.
Returns:
transformed_length_scales (array): The transformed length scales.
These will be in scaled units if `inverse` is `False` or in
real/unscaled units if `inverse` is `True`. Note that
`transformed_length_scales` will be a 1D array even if
`length_scales` was a single float.
'''
# scale_factors is a 1D array with length equal to the number of
# parameters.
scale_factors = self.params_scaler.scale_
if inverse:
transformed_length_scales = length_scales / scale_factors
else:
transformed_length_scales = length_scales * scale_factors
return transformed_length_scales
def _transform_length_scale_bounds(
self,
length_scale_bounds,
inverse=False,
):
'''
Transform length scale bounds to or from scaled units.
This method functions similarly to `self.transform_length_scales()`,
except that it transforms the bounds for the length scales. The same
scalings used for the length scales themselves are applied here to the
lower and upper bounds. To transform from real/unscaled units to scaled
units, call this method with `inverse` set to `False`. To perform the
inverse transformation, namely to transform length scale bounds from
scaled units to real/unscaled units, call this method with `inverse` set
to `True`.
The output array will have a separate scaled min/max value pair for each
parameter length scale. In other words, the output will be an array with
two columns (one for min values and one for max values) and one row for
each parameter length scale. This will be the case even if
`length_scale_bounds` consists of a single min/max value pair because
the scalings are generally different for different parameters.
Note that although `length_scale_bounds` can be a 1D array with only two
entries (a single min/max pair shared by all parameters), this method
always returns a 2D array with a separate min/max pair for each
parameter because the scaling factors aren't generally the same for all
of the parameters. This implies that transforming a 1D array then
performing the inverse transformation will yield a 2D array of identical
min/max pairs rather than the original 1D array.
Args:
length_scale_bounds (array): The bounds for the Gaussian process's
length scales which should be transformed to or from scaled
units. This can either be (a) a 1D array with two entries of the
form `[min, max]` or (b) a 2D array with two columns (min and
max values respectively) and one row for each parameter length
scale.
inverse (bool): This argument controls whether the forward or
inverse transformation is applied. If `False`, then the forward
transformation is applied, which takes `length_scale_bounds` in
real/unscaled units and transforms them to scaled units. If
`True` then this method assumes that `length_scale_bounds` are
in scaled units and transforms them into real/unscaled units.
Default `False`.
Raises:
ValueError: A `ValueError` is raised if `length_scale_bounds` does
not have an acceptable shape. The allowed shapes are `(2,)` (a
single min/max pair shared by all parameters) or
`(self.num_params, 2)` (a separate min/max pair for each
parameter).
Returns:
transformed_length_scale_bounds (array): The transformed length
scale bounds. These will be in scaled units if `inverse` is
`False` or in real/unscaled units if `inverse` is `True`. Note
that `transformed_length_scale_bounds` will always be a 2D array
of shape `(self.num_params, 2)` even if `length_scale_bounds`
was a single pair of min/max values.
'''
if length_scale_bounds.shape == (2,):
# In this case, length_scale_bounds is just one pair of min and max
# values which should be applied to every parameter length scale.
min_, max_ = length_scale_bounds
lower_bounds = np.full(self.num_params, min_)
upper_bounds = np.full(self.num_params, max_)
elif length_scale_bounds.shape == (self.num_params, 2):
# In this case there is a separate min/max bound for each parameter
# length scale.
lower_bounds = length_scale_bounds[:, 0]
upper_bounds = length_scale_bounds[:, 1]
else:
# In this case, length_scale_bounds has an invalid shape.
msg = (
f"length_scale_bounds should either be a 1D array with two "
f"values or a 2D array with two columns and one row for each "
f"parameter ({self.num_params} here) but was "
f"{length_scale_bounds} (shape {length_scale_bounds.shape})."
)
self.log.error(msg)
raise ValueError(msg)
# Use self._transform_length_scales() to transform the limits.
transformed_lower_bounds = self._transform_length_scales(
lower_bounds,
inverse=inverse,
)
transformed_upper_bounds = self._transform_length_scales(
upper_bounds,
inverse=inverse,
)
transformed_length_scale_bounds = np.transpose(
[transformed_lower_bounds, transformed_upper_bounds],
)
return transformed_length_scale_bounds
def _check_length_scale_bounds(self):
'''
Ensure self.length_scale_bounds has a valid value, otherwise raise a
ValueError.
'''
bounds = self.length_scale_bounds
# First ensure that all of the limits are positive numbers.
if not np.all(bounds > 0):
msg = 'Correlation length bounds must all be positive numbers: ' + repr(self.length_scale_bounds)
self.log.error(msg)
raise ValueError(msg)
dims_error_message = ('Length scale bounds must a single pair '
'(min, max) or a list of pairs [(min_0, max_0), '
'..., (min_N, max_N)] with one pair per '
'parameter: ' + repr(bounds))
range_error_message = ('The length scale lower bound must be less than '
'or equal to the upper bound: ' + repr(bounds))
if bounds.ndim == 1:
# In this case, length_scale_bounds should be a single pair of
# numbers, e.g. (1, 2).
if bounds.shape[0] != 2:
self.log.error(dims_error_message)
raise ValueError(dims_error_message)
# Ensure min <= max.
if bounds[1] < bounds[0]:
self.log.error(range_error_message)
raise ValueError(range_error_message)
elif bounds.ndim == 2:
# In this case, length_scale_bounds should be a list of pairs of
# numbers, with exactly one pair per parameter.
if bounds.shape[0] != self.num_params:
self.log.error(dims_error_message)
raise ValueError(dims_error_message)
elif bounds.shape[1] != 2:
self.log.error(dims_error_message)
raise ValueError(dims_error_message)
# Ensure min <= max for all pairs.
if np.any(bounds[:, 1] < bounds[:, 0]):
self.log.error(range_error_message)
raise ValueError(range_error_message)
else:
# Any number of dimensions other that 1 or 2 is definitely wrong.
self.log.error(dims_error_message)
raise ValueError(dims_error_message)
def _check_noise_level_bounds(self):
'''
Ensure self.noise_level has a valid value, otherwise raise a ValueError.
'''
bounds = self.noise_level_bounds
# If self.noise_level_bounds is set to NaN, then it's actual value will
# be automatically set later once training data is available. In that
# case there's no need to check anything.
if np.any(np.isnan(bounds)):
return
# Ensure that all of the limits are positive numbers.
if not np.all(bounds > 0):
msg = ('Noise level bounds must all be positive numbers: ' +
repr(msg))
self.log.error(msg)
raise ValueError(msg)
# Ensure that the dimensions are correct.
if bounds.shape != (2,):
msg = ('Noise level bounds should have exactly two elements: ' +
repr(bounds))
self.log.error(msg)
raise ValueError(msg)
# Ensure min <= max.
if bounds[1] < bounds[0]:
msg = ('Noise level lower bound must be less than or equal to '
'upper bound' + repr(bounds))
self.log.error(msg)
raise ValueError(msg)
def create_gaussian_process(self):
'''
Create a Gaussian process.
'''
gp_kernel = skk.RBF(
length_scale=self.scaled_length_scale,
length_scale_bounds=self.scaled_length_scale_bounds,
)
if self.cost_has_noise:
white_kernel = skk.WhiteKernel(
noise_level=self.scaled_noise_level,
noise_level_bounds=self.scaled_noise_level_bounds,
)
gp_kernel = gp_kernel + white_kernel
alpha = self.scaled_uncers**2
if self.update_hyperparameters:
self.gaussian_process = skg.GaussianProcessRegressor(alpha=alpha, kernel=gp_kernel,n_restarts_optimizer=self.hyperparameter_searches)
else:
self.gaussian_process = skg.GaussianProcessRegressor(alpha=alpha, kernel=gp_kernel,optimizer=None)
def update_archive(self):
'''
Update the archive.
'''
super(GaussianProcessLearner, self).update_archive()
new_values_dict = {
'fit_count': self.fit_count,
'length_scale': self.length_scale,
'noise_level': self.noise_level,
}
self.archive_dict.update(new_values_dict)
def fit_gaussian_process(self):
'''
Fit the Gaussian process to the current data
'''
self.log.debug('Fitting Gaussian process.')
if self.all_params.size==0 or self.all_costs.size==0 or self.all_uncers.size==0:
msg = 'Asked to fit GP but no data is in all_costs, all_params or all_uncers.'
self.log.error(msg)
raise ValueError(msg)
self.scaled_costs = self.cost_scaler.fit_transform(self.all_costs[:,np.newaxis])[:,0]
cost_scaling_factor = float(self.cost_scaler.scale_)
self.scaled_uncers = self.all_uncers / cost_scaling_factor
self.scaled_params = self.params_scaler.transform(self.all_params)
if self.cost_has_noise:
# Ensure compatability with archives from M-LOOP versions <= 3.1.1.
if self._scale_deprecated_noise_levels:
self.noise_level = self.noise_level * cost_scaling_factor**2
self.noise_level_history = [level * cost_scaling_factor**2 for level in self.noise_level_history]
# Mark that scaling is done to avoid doing it multiple times.
self._scale_deprecated_noise_levels = False
if np.isnan(self.noise_level):
# Set noise_level to its default value, which is the variance of
# the training data, which is equal to the square of the cost
# scaling factor. This will only happen on first iteration since
# self.noise_level is overwritten.
self.noise_level = cost_scaling_factor**2
if np.any(np.isnan(self.noise_level_bounds)):
self.noise_level_bounds = np.array([1e-5, 1e5]) * cost_scaling_factor**2
# Cost variance's scaling factor is square of costs's scaling factor.
self.scaled_noise_level = self.noise_level / cost_scaling_factor**2
self.scaled_noise_level_bounds = self.noise_level_bounds / cost_scaling_factor**2
self.create_gaussian_process()
self.gaussian_process.fit(self.scaled_params,self.scaled_costs)
if self.update_hyperparameters:
self.fit_count += 1
last_hyperparameters = self.gaussian_process.kernel_.get_params()
if self.cost_has_noise:
self.scaled_length_scale = last_hyperparameters['k1__length_scale']
self.length_scale = self._transform_length_scales(
self.scaled_length_scale,
inverse=True,
)
if isinstance(self.length_scale, float):
self.length_scale = np.array([self.length_scale])
self.length_scale_history.append(self.length_scale)
self.scaled_noise_level = last_hyperparameters['k2__noise_level']
self.noise_level = self.scaled_noise_level * cost_scaling_factor**2
self.noise_level_history.append(self.noise_level)
else:
self.scaled_length_scale = last_hyperparameters['length_scale']
self.length_scale = self._transform_length_scales(
self.scaled_length_scale,
inverse=True,
)
self.length_scale_history.append(self.length_scale)
def update_bias_function(self):
'''
Set the constants for the cost bias function.
'''
self.cost_bias = self.bias_func_cost_factor[self.params_count%self.bias_func_cycle]
self.uncer_bias = self.bias_func_uncer_factor[self.params_count%self.bias_func_cycle]
def predict_biased_cost(self, params, perform_scaling=True):
'''
Predict the biased cost at the given parameters.
The biased cost is a weighted sum of the predicted cost and the
uncertainty of the prediced cost. In particular, the bias function is:
`biased_cost = cost_bias * pred_cost - uncer_bias * pred_uncer`
Args:
params (array): A 1D array containing the values for each parameter.
These should be in real/unscaled units if `perform_scaling` is
`True` or they should be in scaled units if `perform_scaling` is
`False`.
perform_scaling (bool, optional): Whether or not the parameters and
biased costs should be scaled. If `True` then this method takes
in parameter values in real/unscaled units then returns a biased
predicted cost in real/unscaled units. If `False`, then this
method takes parameter values in scaled units and returns a
biased predicted cost in scaled units. Note that this method
cannot determine on its own if the values in `params` are in
real/unscaled units or scaled units; it is up to the caller to
pass the correct values. Defaults to `True`.
Returns:
pred_bias_cost (float): Biased cost predicted for the given
parameters. This will be in real/unscaled units if
`perform_scaling` is `True` or it will be in scaled units if
`perform_scaling` is `False`.
'''
# Determine the predicted cost and uncertainty.
cost, uncertainty = self.predict_cost(
params,
perform_scaling=perform_scaling,
return_uncertainty=True,
)
# Calculate the biased cost.
biased_cost = self.cost_bias * cost - self.uncer_bias * uncertainty
return biased_cost
def find_next_parameters(self):
'''
Get the next parameters to test.
This method searches for the parameters expected to give the minimum
biased cost, as predicted by the Gaussian process. The biased cost is
not just the predicted cost, but a weighted sum of the predicted cost
and the uncertainty in the predicted cost. See
`self.predict_biased_cost()` for more information.
This method additionally increments `self.params_count` appropriately.
Return:
next_params (array): The next parameter values to try, stored in a
1D array.
'''
# Increment the counter and update the bias function.
self.params_count += 1
self.update_bias_function()
# Define the function to minimize when picking the next parameters.
def scaled_biased_cost_function(scaled_parameters):
scaled_biased_cost = self.predict_biased_cost(
scaled_parameters,
perform_scaling=False,
)
return scaled_biased_cost
# Set bounds on the parameter-space for the search.
scaled_search_region = self.params_scaler.transform(self.search_region.T).T
# Find the scaled parameters which minimize the biased cost function.
next_scaled_params = self._find_predicted_minimum(
scaled_figure_of_merit_function=scaled_biased_cost_function,
scaled_search_region=scaled_search_region,
params_scaler=self.params_scaler,
)
# Convert the scaled parameters to real/unscaled units.
next_params = self.params_scaler.inverse_transform([next_scaled_params])[0]
return next_params
def run(self):
'''
Starts running the Gaussian process learner. When the new parameters event is triggered, reads the cost information provided and updates the Gaussian process with the information. Then searches the Gaussian process for new optimal parameters to test based on the biased cost. Parameters to test next are put on the output parameters queue.
'''
#logging to the main log file from a process (as apposed to a thread) in cpython is currently buggy on windows and/or python 2.7
#current solution is to only log to the console for warning and above from a process
self.log = mp.log_to_stderr(logging.WARNING)
try:
while not self.end_event.is_set():
#self.log.debug('Learner waiting for new params event')
self.save_archive()
self.wait_for_new_params_event()
#self.log.debug('Gaussian process learner reading costs')
self.get_params_and_costs()
self.fit_gaussian_process()
for _ in range(self.generation_num):
self.log.debug('Gaussian process learner generating parameter:'+ str(self.params_count+1))
next_params = self.find_next_parameters()
self.params_out_queue.put(next_params)
if self.end_event.is_set():
raise LearnerInterrupt()
except LearnerInterrupt:
pass
end_dict = {}
if self.predict_global_minima_at_end:
if not self.costs_in_queue.empty():
# There are new parameters, get them.
self.get_params_and_costs()
self.fit_gaussian_process()
self.find_global_minima()
end_dict.update({'predicted_best_parameters':self.predicted_best_parameters,
'predicted_best_cost':self.predicted_best_cost,
'predicted_best_uncertainty':self.predicted_best_uncertainty})
self.params_out_queue.put(end_dict)
self._shut_down()
self.log.debug('Ended Gaussian Process Learner')
def predict_cost(
self,
params,
perform_scaling=True,
return_uncertainty=False,
):
'''
Predict the cost for `params` using `self.gaussian_process`.
This method also optionally returns the uncertainty of the predicted
cost.
By default (with `perform_scaling=True`) this method will use
`self.params_scaler` to scale the input values and then use
`self.cost_scaler` to scale the cost back to real/unscaled units. If
`perform_scaling` is `False`, then this scaling will NOT be done. In
that case, `params` should consist of already-scaled parameter values
and the returned cost (and optional uncertainty) will be in scaled
units.
Args:
params (array): A 1D array containing the values for each parameter.
These should be in real/unscaled units if `perform_scaling` is
`True` or they should be in scaled units if `perform_scaling` is
`False`.
perform_scaling (bool, optional): Whether or not the parameters and
costs should be scaled. If `True` then this method takes in
parameter values in real/unscaled units then returns a predicted
cost (and optionally the predicted cost uncertainty) in
real/unscaled units. If `False`, then this method takes
parameter values in scaled units and returns a cost (and
optionally the predicted cost uncertainty) in scaled units. Note
that this method cannot determine on its own if the values in
`params` are in real/unscaled units or scaled units; it is up to
the caller to pass the correct values. Defaults to `True`.
return_uncertainty (bool, optional): This optional argument controls
whether or not the predicted cost uncertainty is returned with
the predicted cost. The predicted cost uncertainty will be in
real/unscaled units if `perform_scaling` is `True` and will be
in scaled units if `perform_scaling` is `False`. Defaults to
`False`.
Returns:
cost (float): Predicted cost at `params`. The cost will be in
real/unscaled units if `perform_scaling` is `True` and will be
in scaled units if `perform_scaling` is `False`.
uncertainty (float, optional): The uncertainty of the predicted
cost. This will be in the same units (either real/unscaled or
scaled) as the returned `cost`. The `cost_uncertainty` will only
be returned if `return_uncertainty` is `True`.
'''
# Reshape to 2D array as the methods below expect this format.
params = params[np.newaxis,:]
# Scale the input parameters if set to do so.
if perform_scaling:
scaled_params = self.params_scaler.transform(params)
else:
scaled_params = params
# Generate the prediction using self.gaussian_process.
predicted_results = self.gaussian_process.predict(
scaled_params,
return_std=return_uncertainty,
)
if return_uncertainty:
scaled_cost, scaled_uncertainty = predicted_results
else:
scaled_cost = predicted_results
# Un-scale the cost if set to do so.
if perform_scaling:
cost = self.cost_scaler.inverse_transform(
scaled_cost.reshape(1, -1),
)
cost = cost[0, 0] # Extract from 2D array.
else:
cost = scaled_cost[0] # Extract from 1D array.
# Un-scale the uncertainty if set to do so.
if return_uncertainty:
if perform_scaling:
cost_scaling_factor = self.cost_scaler.scale_
uncertainty = scaled_uncertainty * cost_scaling_factor
else:
uncertainty = scaled_uncertainty
uncertainty = uncertainty[0] # Extract from 1D array.
# Return the requested results.
if return_uncertainty:
return cost, uncertainty
else:
return cost
def find_global_minima(self):
'''
Search for the global minima predicted by the Gaussian process.
This method will attempt to find the global minima predicted by the
Gaussian process, but it is possible for it to become stuck in local
minima of the predicted cost landscape.
This method does not return any values, but creates the attributes
listed below.
Attributes:
predicted_best_parameters (array): The parameter values which are
predicted to yield the best results, as a 1D array.
predicted_best_cost (array): The predicted cost at the
`predicted_best_parameters`, in a 1D 1-element array.
predicted_best_uncertainty (array): The uncertainty of the predicted
cost at `predicted_best_parameters`, in a 1D 1-element array.
'''
self.log.debug('Started search for predicted global minima.')
# Define the function to minimize when looking for the global minima.
def scaled_cost_function(scaled_parameters):
scaled_cost = self.predict_cost(
scaled_parameters,
perform_scaling=False,
return_uncertainty=False,
)
return scaled_cost
# Set bounds on parameter-space for the search. Don't constrain to the
# trust region when looking for global minima.
search_region = np.transpose([self.min_boundary, self.max_boundary])
scaled_search_region = self.params_scaler.transform(search_region.T).T
# Find the scaled parameters which minimize the cost function.
best_scaled_params = self._find_predicted_minimum(
scaled_figure_of_merit_function=scaled_cost_function,
scaled_search_region=scaled_search_region,
params_scaler=self.params_scaler,
)
# Calculate some other related values in scaled units.
scaled_cost, scaled_uncertainty = self.predict_cost(
best_scaled_params,
perform_scaling=False,
return_uncertainty=True,
)
self._predicted_best_scaled_cost = scaled_cost
self._predicted_best_scaled_uncertainty = scaled_uncertainty
# Calculate some other related values in real/unscaled units.
self.predicted_best_parameters = self.params_scaler.inverse_transform([best_scaled_params])[0]
cost, uncertainty = self.predict_cost(
self.predicted_best_parameters,
perform_scaling=True,
return_uncertainty=True,
)
self.predicted_best_cost = cost
self.predicted_best_uncertainty = uncertainty
# Store results.
self.archive_dict.update(
{
'predicted_best_parameters': self.predicted_best_parameters,
'predicted_best_scaled_cost': self._predicted_best_scaled_cost,
'predicted_best_scaled_uncertainty': self._predicted_best_scaled_uncertainty,
'predicted_best_cost': self.predicted_best_cost,
'predicted_best_uncertainty': self.predicted_best_uncertainty,
}
)
self.has_global_minima = True
self.log.debug('Predicted global minima found.')
class NeuralNetLearner(MachineLearner, mp.Process):
'''
Learner that uses a neural network for function approximation.
Args:
params_out_queue (queue): Queue for parameters sent to controller.
costs_in_queue (queue): Queue for costs.
end_event (event): Event to trigger end of learner.
Keyword Args:
update_hyperparameters (Optional [bool]): Whether the hyperparameters
used to prevent overfitting should be tuned by trying out different
values. Setting to `True` can reduce overfitting of the model, but
can slow down the fitting due to the computational cost of trying
different values. Default `False`.
nn_training_filename (Optional [str]): The name of a learner archive
from a previous optimization from which to extract past results for
use in the current optimization. If `None`, no past results will be
used. Default `None`.
nn_training_file_type (Optional [str]): File type of the training
archive. Can be `'txt'`, `'pkl'`, `'mat'`, or `None`. If set to
`None`, then the file type will be determined automatically. This
argument has no effect if `nn_training_filename` is set to `None`.
Default `None`.
trust_region (Optional [float or array]): The trust region defines the maximum distance the learner will travel from the current best set of parameters. If None, the learner will search everywhere. If a float, this number must be between 0 and 1 and defines maximum distance the learner will venture as a percentage of the boundaries. If it is an array, it must have the same size as the number of parameters and the numbers define the maximum absolute distance that can be moved along each direction.
default_bad_cost (Optional [float]): If a run is reported as bad and default_bad_cost is provided, the cost for the bad run is set to this default value. If default_bad_cost is None, then the worst cost received is set to all the bad runs. Default None.
default_bad_uncertainty (Optional [float]): If a run is reported as bad and default_bad_uncertainty is provided, the uncertainty for the bad run is set to this default value. If default_bad_uncertainty is None, then the uncertainty is set to a tenth of the best to worst cost range. Default None.
minimum_uncertainty (Optional [float]): The minimum uncertainty associated with provided costs. Must be above zero to avoid fitting errors. Default 1e-8.
predict_global_minima_at_end (Optional [bool]): If True finds the global minima when the learner is ended. Does not if False. Default True.
Attributes:
all_params (array): Array containing all parameters sent to learner.
all_costs (array): Array containing all costs sent to learner.
all_uncers (array): Array containing all uncertainties sent to learner.
scaled_costs (array): Array contaning all the costs scaled to have zero mean and a standard deviation of 1.
bad_run_indexs (list): list of indexes to all runs that were marked as bad.
best_cost (float): Minimum received cost, updated during execution.
best_params (array): Parameters of best run. (reference to element in params array).
best_index (int): index of the best cost and params.
worst_cost (float): Maximum received cost, updated during execution.
worst_index (int): index to run with worst cost.
cost_range (float): Difference between worst_cost and best_cost
generation_num (int): Number of sets of parameters to generate each generation. Set to 5.
noise_level_history (list): List of noise levels found after each fit.
cost_count (int): Counter for the number of costs, parameters and uncertainties added to learner.
params_count (int): Counter for the number of parameters asked to be evaluated by the learner.
neural_net (NeuralNet): Neural net that is fitted to data and used to make predictions.
cost_scaler (StandardScaler): Scaler used to normalize the provided costs.
cost_scaler_init_index (int): The number of params to use to initialise cost_scaler.
has_trust_region (bool): Whether the learner has a trust region.
'''
_ARCHIVE_TYPE = 'neural_net_learner'
def __init__(self,
update_hyperparameters=False,
nn_training_filename =None,
nn_training_file_type =None,
**kwargs):
if nn_training_filename is not None:
super(NeuralNetLearner,self).__init__(
training_filename=nn_training_filename,
training_file_type=nn_training_file_type,
**kwargs
)
else:
super(NeuralNetLearner,self).__init__(**kwargs)
#Constants, limits and tolerances
self.num_nets = 3
self.generation_num = 3
# Parameters that should only be loaded if a training archive was
# provided and it has the same learner type and min/max boundaries.
same_learner_type = self._learner_type_matches_training_archive
same_boundaries = self._boundaries_match_training_archive
if same_learner_type and same_boundaries:
training_dict = self.training_dict
# Restore last net regularization coefficient values.
self.initial_regularizations = []
for j in range(self.num_nets):
net_dict = training_dict['net_{index}'.format(index=j)]
last_net_reg = net_dict['last_net_reg']
self.initial_regularizations.append(float(last_net_reg))
else:
# Set initial regularizations to None to let NeuralNet use its
# default value.
self.initial_regularizations = [None] * self.num_nets
# Set training file directory to None for now since no nets will be
# loaded here. The NeuralNetVisualizer will overwrite this when it needs
# to load the nets.
self.nn_training_file_dir = None
# The scaler will be initialised when we're ready to fit it
self.cost_scaler = None
self.cost_scaler_init_index = None
#Optional user set variables
self.update_hyperparameters = bool(update_hyperparameters)
# Update archive.
new_values_dict = {
'archive_type': self._ARCHIVE_TYPE,
'generation_num': self.generation_num,
'update_hyperparameters': self.update_hyperparameters,
}
self.archive_dict.update(new_values_dict)
#Remove logger so neural net can be safely picked for multiprocessing on Windows
self.log = None
def _construct_net(self):
self.neural_net = [
mlnn.NeuralNet(
num_params=self.num_params,
fit_hyperparameters=self.update_hyperparameters,
learner_archive_dir=self.learner_archive_dir,
start_datetime=self.start_datetime,
regularization_coefficient=self.initial_regularizations[j],
)
for j in range(self.num_nets)
]
def _init_cost_scaler(self):
'''
Initialises the cost scaler. cost_scaler_init_index must be set.
'''
self.cost_scaler = skp.StandardScaler(with_mean=False, with_std=False)
self.cost_scaler.fit(self.all_costs[:self.cost_scaler_init_index,np.newaxis])
def create_neural_net(self):
'''
Creates the neural net. Must be called from the same process as fit_neural_net, predict_cost and predict_costs_from_param_array.
'''
self._construct_net()
for n in self.neural_net:
n.init()
def import_neural_net(self):
'''
Imports neural net parameters from the training dictionary provided at construction. Must be called from the same process as fit_neural_net, predict_cost and predict_costs_from_param_array. You must call exactly one of this and create_neural_net before calling other methods.
'''
if not self.training_dict:
msg = ('A training file must be provided during initialization in '
'order to import saved neural nets.')
raise ValueError(msg)
self._construct_net()
for i, n in enumerate(self.neural_net):
n.load(self.training_dict['net_' + str(i)],
extra_search_dirs=[self.nn_training_file_dir])
def _fit_neural_net(self,index):
'''
Fits a neural net to the data.
cost_scaler must have been fitted before calling this method.
'''
self.scaled_costs = self.cost_scaler.transform(self.all_costs[:,np.newaxis])[:,0]
self.neural_net[index].fit_neural_net(self.all_params, self.scaled_costs)
def predict_cost(
self,
params,
net_index=None,
perform_scaling=True,
):
'''
Predict the cost from the neural net for `params`.
This method is a wrapper around
`mloop.neuralnet.NeuralNet.predict_cost()`.
Args:
params (array): A 1D array containing the values for each parameter.
These should be in real/unscaled units if `perform_scaling` is
`True` or they should be in scaled units if `perform_scaling` is
`False`.
net_index (int, optional): The index of the neural net to use to
predict the cost. If `None` then a net will be randomly chosen.
Defaults to `None`.
perform_scaling (bool, optional): Whether or not the parameters and
costs should be scaled. If `True` then this method takes in
parameter values in real/unscaled units then returns a predicted
cost in real/unscaled units. If `False`, then this method takes
parameter values in scaled units and returns a cost in scaled
units. Note that this method cannot determine on its own if the
values in `params` are in real/unscaled units or scaled units;
it is up to the caller to pass the correct values. Defaults to
`True`.
Returns:
cost (float): Predicted cost for `params`. This will be in
real/unscaled units if `perform_scaling` is `True` or it will be
in scaled units if `perform_scaling` is `False`.
'''
if net_index is None:
net_index = nr.randint(self.num_nets)
net = self.neural_net[net_index]
cost = net.predict_cost(params, perform_scaling=perform_scaling)
return cost
def predict_cost_gradient(
self,
params,
net_index=None,
perform_scaling=True,
):
'''
Predict the gradient of the cost function at `params`.
This method is a wrapper around
`mloop.neuralnet.NeuralNet.predict_cost_gradient()`.
Args:
params (array): A 1D array containing the values for each parameter.
These should be in real/unscaled units if `perform_scaling` is
`True` or they should be in scaled units if `perform_scaling` is
`False`.
net_index (int, optional): The index of the neural net to use to
predict the cost gradient. If `None` then a net will be randomly
chosen. Defaults to `None`.
perform_scaling (bool, optional): Whether or not the parameters and
costs should be scaled. If `True` then this method takes in
parameter values in real/unscaled units then returns a predicted
cost gradient in real/unscaled units. If `False`, then this
method takes parameter values in scaled units and returns a cost
gradient in scaled units. Note that this method cannot determine
on its own if the values in `params` are in real/unscaled units
or scaled units; it is up to the caller to pass the correct
values. Defaults to `True`.
Returns:
cost_gradient (np.float64): The predicted gradient at `params`. This
will be in real/unscaled units if `perform_scaling` is `True` or
it will be in scaled units if `perform_scaling` is `False`.
'''
if net_index is None:
net_index = nr.randint(self.num_nets)
net = self.neural_net[net_index]
cost_gradient = net.predict_cost_gradient(
params,
perform_scaling=perform_scaling,
)
# scipy.optimize.minimize() doesn't seem to like a 32-bit Jacobian, so
# convert to 64-bit.
cost_gradient = cost_gradient.astype(np.float64)
return cost_gradient
def predict_costs_from_param_array(self,params,net_index=None):
'''
Produces a prediction of costs from an array of params.
Returns:
float : Predicted cost at paramters
'''
# TODO: Can do this more efficiently.
return [self.predict_cost(param,net_index) for param in params]
def update_archive(self):
'''
Update the archive.
'''
super(NeuralNetLearner, self).update_archive()
new_values_dict = {
'cost_scaler_init_index':self.cost_scaler_init_index,
}
self.archive_dict.update(new_values_dict)
if self.neural_net:
for i,n in enumerate(self.neural_net):
self.archive_dict.update({'net_'+str(i):n.save()})
def find_next_parameters(self, net_index=None):
'''
Get the next parameters to test.
This method searches for the parameters expected to give the minimum
cost, as predicted by a neural net.
This method additionally increments `self.params_count` appropriately.
Args:
net_index (int, optional): The index of the neural net to use to
predict the cost. If `None` then a net will be randomly chosen.
Defaults to `None`.
Return:
next_params (array): The next parameter values to try, stored in a
1D array.
'''
# Set default values.
if net_index is None:
net_index = nr.randint(self.num_nets)
net = self.neural_net[net_index]
# Create functions for the search.
def scaled_cost_function(scaled_params):
scaled_cost = self.predict_cost(
scaled_params,
net_index=net_index,
perform_scaling=False,
)
return scaled_cost
def scaled_cost_jacobian_function(scaled_params):
scaled_jacobian = self.predict_cost_gradient(
scaled_params,
net_index=net_index,
perform_scaling=False,
)
return scaled_jacobian
# Get the ParameterScaler used for this neural net.
params_scaler = net._param_scaler
# Set bounds on the parameter-space for the search.
scaled_search_region = params_scaler.transform(self.search_region.T).T
# Find the scaled parameters which minimize the predicted cost function.
net.start_opt()
next_scaled_params = self._find_predicted_minimum(
scaled_figure_of_merit_function=scaled_cost_function,
scaled_search_region=scaled_search_region,
params_scaler=params_scaler,
scaled_jacobian_function=scaled_cost_jacobian_function,
)
net.stop_opt()
# Convert scaled parameters to real/unscaled units and get the predicted
# cost.
next_params = params_scaler.inverse_transform([next_scaled_params])[0]
next_cost = self.predict_cost(
next_params,
net_index=net_index,
perform_scaling=True,
)
# Increment the counter.
self.params_count += 1
# Return results.
self.log.debug(
"Suggesting params " + str(next_params) + " with predicted cost: "
+ str(next_cost)
)
return next_params
def run(self):
'''
Starts running the neural network learner. When the new parameters event is triggered, reads the cost information provided and updates the neural network with the information. Then searches the neural network for new optimal parameters to test based on the biased cost. Parameters to test next are put on the output parameters queue.
'''
#logging to the main log file from a process (as apposed to a thread) in cpython is currently buggy on windows and/or python 2.7
#current solution is to only log to the console for warning and above from a process
self.log = mp.log_to_stderr(logging.WARNING)
# The network needs to be created in the same process in which it runs
self.create_neural_net()
# We cycle through our different nets to generate each new set of params. This keeps track
# of the current net.
net_index = 0
try:
while not self.end_event.is_set():
self.log.debug('Learner waiting for new params event')
# TODO: Not doing this because it's slow. Is it necessary?
#self.save_archive()
self.wait_for_new_params_event()
self.log.debug('NN learner reading costs')
self.get_params_and_costs()
if self.cost_scaler_init_index is None:
self.cost_scaler_init_index = len(self.all_costs)
self._init_cost_scaler()
# Now we need to generate generation_num new param sets, by iterating over our
# nets. We want to fire off new params as quickly as possible, so we don't train a
# net until we actually need to use it. But we need to make sure that each net gets
# trained exactly once, regardless of how many times it's used to generate new
# params.
num_nets_trained = 0
for _ in range(self.generation_num):
if num_nets_trained < self.num_nets:
self._fit_neural_net(net_index)
num_nets_trained += 1
self.log.debug('Neural network learner generating parameter:'+ str(self.params_count+1))
next_params = self.find_next_parameters(net_index)
net_index = (net_index + 1) % self.num_nets
self.params_out_queue.put(next_params)
if self.end_event.is_set():
raise LearnerInterrupt()
# Train any nets that haven't been trained yet.
for i in range(self.num_nets - num_nets_trained):
self._fit_neural_net((net_index + i) % self.num_nets)
except LearnerInterrupt:
pass
end_dict = {}
if self.predict_global_minima_at_end:
if not self.costs_in_queue.empty():
# There are new parameters, get them.
self.get_params_and_costs()
# TODO: Somehow support predicting minima from all nets, rather than just net 0.
self._fit_neural_net(0)
self.find_global_minima(0)
end_dict.update({'predicted_best_parameters':self.predicted_best_parameters,
'predicted_best_cost':self.predicted_best_cost})
self.params_out_queue.put(end_dict)
self._shut_down()
for n in self.neural_net:
n.destroy()
self.log.debug('Ended neural network learner')
def find_global_minima(self, net_index=None):
'''
Search for the global minima predicted by the neural net.
This method will attempt to find the global minima predicted by the
neural net, but it is possible for it to become stuck in local minima of
the predicted cost landscape.
This method does not return any values, but creates the attributes
listed below.
Args:
net_index (int, optional): The index of the neural net to use to
predict the cost. If `None` then a net will be randomly chosen.
Defaults to `None`.
Attributes:
predicted_best_parameters (array): The parameter values which are
predicted to yield the best results, as a 1D array.
predicted_best_cost (array): The predicted cost at the
`predicted_best_parameters`, in a 1D 1-element array.
'''
self.log.debug('Started search for predicted global minima.')
# Set default values.
if net_index is None:
net_index = nr.randint(self.num_nets)
# Call self.find_next_parameters() since that method searches for the
# predicted minimum.
self.predicted_best_parameters = self.find_next_parameters(
net_index=net_index,
)
# Get the predicted scaled/un-scaled costs at the predicted best
# parameters.
self.predicted_best_cost = self.predict_cost(
params=self.predicted_best_parameters,
net_index=net_index,
perform_scaling=True,
)
net = self.neural_net[net_index]
self._predicted_best_scaled_cost = self.predict_cost(
params=net._scale_params(self.predicted_best_parameters),
net_index=net_index,
perform_scaling=False,
)
# Store results.
self.archive_dict.update(
{
'predicted_best_parameters': self.predicted_best_parameters,
'predicted_best_scaled_cost': self._predicted_best_scaled_cost,
'predicted_best_cost': self.predicted_best_cost
}
)
self.has_global_minima = True
self.log.debug('Predicted global minima found.')
# Methods for debugging/analysis.
def get_losses(self):
all_losses = []
for n in self.neural_net:
all_losses.append(n.get_losses())
return all_losses
def get_regularization_histories(self):
'''
Get the regularization coefficient values used by the nets.
Returns:
list of list of float: The values used by the neural nets for the
regularization coefficient. There is one list per net, which
includes all of the regularization coefficient values used by
that net during the optimization. If the optimization was run
with `update_hyperparameters` set to `False`, then each net's
list will only have one entry, namely the initial default value
for the regularization coefficient. If the optimization was run
with `updated_hyperparameters` set to `True` then the list will
also include the optimal values for the regularization
coefficient determined during each hyperparameter fitting.
'''
regularization_histories = []
for net in self.neural_net:
regularization_histories.append(net.regularization_history)
return regularization_histories
|
michaelhush/M-LOOP
|
mloop/learners.py
|
Python
|
mit
| 144,110
|
[
"Gaussian"
] |
d9cb4921a794fd25c287cd7d13c58778f7cde0055de1d83d977dca9d009385cc
|
"""Utility functions for plotting M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
import math
from functools import partial
import difflib
import webbrowser
import tempfile
import numpy as np
from copy import deepcopy
from ..channels.layout import _auto_topomap_coords
from ..channels.channels import _contains_ch_type
from ..defaults import _handle_default
from ..io import show_fiff, Info
from ..io.pick import channel_type, channel_indices_by_type
from ..utils import verbose, set_config, warn
from ..externals.six import string_types
from ..fixes import _get_argrelmax
COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '#473C8B', '#458B74',
'#CD7F32', '#FF4040', '#ADFF2F', '#8E2323', '#FF1493']
def _setup_vmin_vmax(data, vmin, vmax, norm=False):
"""Aux function to handle vmin and vmax parameters"""
if vmax is None and vmin is None:
vmax = np.abs(data).max()
if norm:
vmin = 0.
else:
vmin = -vmax
else:
if callable(vmin):
vmin = vmin(data)
elif vmin is None:
if norm:
vmin = 0.
else:
vmin = np.min(data)
if callable(vmax):
vmax = vmax(data)
elif vmax is None:
vmax = np.max(data)
return vmin, vmax
def plt_show(show=True, **kwargs):
"""Helper to show a figure while suppressing warnings"""
import matplotlib
import matplotlib.pyplot as plt
if show and matplotlib.get_backend() != 'agg':
plt.show(**kwargs)
def tight_layout(pad=1.2, h_pad=None, w_pad=None, fig=None):
""" Adjust subplot parameters to give specified padding.
Note. For plotting please use this function instead of plt.tight_layout
Parameters
----------
pad : float
padding between the figure edge and the edges of subplots, as a
fraction of the font-size.
h_pad : float
Padding height between edges of adjacent subplots.
Defaults to `pad_inches`.
w_pad : float
Padding width between edges of adjacent subplots.
Defaults to `pad_inches`.
fig : instance of Figure
Figure to apply changes to.
"""
import matplotlib.pyplot as plt
fig = plt.gcf() if fig is None else fig
fig.canvas.draw()
try: # see https://github.com/matplotlib/matplotlib/issues/2654
fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
except Exception:
try:
fig.set_tight_layout(dict(pad=pad, h_pad=h_pad, w_pad=w_pad))
except Exception:
warn('Matplotlib function "tight_layout" is not supported.'
' Skipping subplot adjustment.')
def _check_delayed_ssp(container):
""" Aux function to be used for interactive SSP selection
"""
if container.proj is True or\
all(p['active'] for p in container.info['projs']):
raise RuntimeError('Projs are already applied. Please initialize'
' the data with proj set to False.')
elif len(container.info['projs']) < 1:
raise RuntimeError('No projs found in evoked.')
def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
"""Return a colormap similar to that used by mne_analyze
Parameters
----------
limits : list (or array) of length 3 or 6
Bounds for the colormap, which will be mirrored across zero if length
3, or completely specified (and potentially asymmetric) if length 6.
format : str
Type of colormap to return. If 'matplotlib', will return a
matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will
return an RGBA array of shape (256, 4).
Returns
-------
cmap : instance of matplotlib.pyplot.colormap | array
A teal->blue->gray->red->yellow colormap.
Notes
-----
For this will return a colormap that will display correctly for data
that are scaled by the plotting function to span [-fmax, fmax].
Examples
--------
The following code will plot a STC using standard MNE limits:
colormap = mne.viz.mne_analyze_colormap(limits=[5, 10, 15])
brain = stc.plot('fsaverage', 'inflated', 'rh', colormap)
brain.scale_data_colormap(fmin=-15, fmid=0, fmax=15, transparent=False)
"""
# Ensure limits is an array
limits = np.asarray(limits, dtype='float')
if len(limits) != 3 and len(limits) != 6:
raise ValueError('limits must have 3 or 6 elements')
if len(limits) == 3 and any(limits < 0.):
raise ValueError('if 3 elements, limits must all be non-negative')
if any(np.diff(limits) <= 0):
raise ValueError('limits must be monotonically increasing')
if format == 'matplotlib':
from matplotlib import colors
if len(limits) == 3:
limits = (np.concatenate((-np.flipud(limits), limits)) +
limits[-1]) / (2 * limits[-1])
else:
limits = (limits - np.min(limits)) / np.max(limits -
np.min(limits))
cdict = {'red': ((limits[0], 0.0, 0.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 1.0, 1.0),
(limits[5], 1.0, 1.0)),
'green': ((limits[0], 1.0, 1.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 1.0, 1.0)),
'blue': ((limits[0], 1.0, 1.0),
(limits[1], 1.0, 1.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 0.0, 0.0))}
return colors.LinearSegmentedColormap('mne_analyze', cdict)
elif format == 'mayavi':
if len(limits) == 3:
limits = np.concatenate((-np.flipud(limits), [0], limits)) /\
limits[-1]
else:
limits = np.concatenate((limits[:3], [0], limits[3:]))
limits /= np.max(np.abs(limits))
r = np.array([0, 0, 0, 0, 1, 1, 1])
g = np.array([1, 0, 0, 0, 0, 0, 1])
b = np.array([1, 1, 1, 0, 0, 0, 0])
a = np.array([1, 1, 0, 0, 0, 1, 1])
xp = (np.arange(256) - 128) / 128.0
colormap = np.r_[[np.interp(xp, limits, 255 * c)
for c in [r, g, b, a]]].T
return colormap
else:
raise ValueError('format must be either matplotlib or mayavi')
def _toggle_options(event, params):
"""Toggle options (projectors) dialog"""
import matplotlib.pyplot as plt
if len(params['projs']) > 0:
if params['fig_proj'] is None:
_draw_proj_checkbox(event, params, draw_current_state=False)
else:
# turn off options dialog
plt.close(params['fig_proj'])
del params['proj_checks']
params['fig_proj'] = None
def _toggle_proj(event, params):
"""Operation to perform when proj boxes clicked"""
# read options if possible
if 'proj_checks' in params:
bools = [x[0].get_visible() for x in params['proj_checks'].lines]
for bi, (b, p) in enumerate(zip(bools, params['projs'])):
# see if they tried to deactivate an active one
if not b and p['active']:
bools[bi] = True
else:
bools = [True] * len(params['projs'])
compute_proj = False
if 'proj_bools' not in params:
compute_proj = True
elif not np.array_equal(bools, params['proj_bools']):
compute_proj = True
# if projectors changed, update plots
if compute_proj is True:
params['plot_update_proj_callback'](params, bools)
def _get_help_text(params):
"""Aux function for customizing help dialogs text."""
text, text2 = list(), list()
text.append(u'\u2190 : \n')
text.append(u'\u2192 : \n')
text.append(u'\u2193 : \n')
text.append(u'\u2191 : \n')
text.append(u'- : \n')
text.append(u'+ or = : \n')
text.append(u'Home : \n')
text.append(u'End : \n')
text.append(u'Page down : \n')
text.append(u'Page up : \n')
text.append(u'F11 : \n')
text.append(u'? : \n')
text.append(u'Esc : \n\n')
text.append(u'Mouse controls\n')
text.append(u'click on data :\n')
text2.append('Navigate left\n')
text2.append('Navigate right\n')
text2.append('Scale down\n')
text2.append('Scale up\n')
text2.append('Toggle full screen mode\n')
text2.append('Open help box\n')
text2.append('Quit\n\n\n')
if 'raw' in params:
text2.insert(4, 'Reduce the time shown per view\n')
text2.insert(5, 'Increase the time shown per view\n')
text.append(u'click elsewhere in the plot :\n')
if 'ica' in params:
text.append(u'click component name :\n')
text2.insert(2, 'Navigate components down\n')
text2.insert(3, 'Navigate components up\n')
text2.insert(8, 'Reduce the number of components per view\n')
text2.insert(9, 'Increase the number of components per view\n')
text2.append('Mark bad channel\n')
text2.append('Vertical line at a time instant\n')
text2.append('Show topography for the component\n')
else:
text.append(u'click channel name :\n')
text2.insert(2, 'Navigate channels down\n')
text2.insert(3, 'Navigate channels up\n')
text2.insert(8, 'Reduce the number of channels per view\n')
text2.insert(9, 'Increase the number of channels per view\n')
text2.append('Mark bad channel\n')
text2.append('Vertical line at a time instant\n')
text2.append('Mark bad channel\n')
elif 'epochs' in params:
text.append(u'right click :\n')
text2.insert(4, 'Reduce the number of epochs per view\n')
text2.insert(5, 'Increase the number of epochs per view\n')
if 'ica' in params:
text.append(u'click component name :\n')
text2.insert(2, 'Navigate components down\n')
text2.insert(3, 'Navigate components up\n')
text2.insert(8, 'Reduce the number of components per view\n')
text2.insert(9, 'Increase the number of components per view\n')
text2.append('Mark component for exclusion\n')
text2.append('Vertical line at a time instant\n')
text2.append('Show topography for the component\n')
else:
text.append(u'click channel name :\n')
text.append(u'right click channel name :\n')
text2.insert(2, 'Navigate channels down\n')
text2.insert(3, 'Navigate channels up\n')
text2.insert(8, 'Reduce the number of channels per view\n')
text2.insert(9, 'Increase the number of channels per view\n')
text.insert(10, u'b : \n')
text2.insert(10, 'Toggle butterfly plot on/off\n')
text.insert(11, u'h : \n')
text2.insert(11, 'Show histogram of peak-to-peak values\n')
text2.append('Mark bad epoch\n')
text2.append('Vertical line at a time instant\n')
text2.append('Mark bad channel\n')
text2.append('Plot ERP/ERF image\n')
text.append(u'middle click :\n')
text2.append('Show channel name (butterfly plot)\n')
text.insert(11, u'o : \n')
text2.insert(11, 'View settings (orig. view only)\n')
return ''.join(text), ''.join(text2)
def _prepare_trellis(n_cells, max_col):
"""Aux function
"""
import matplotlib.pyplot as plt
if n_cells == 1:
nrow = ncol = 1
elif n_cells <= max_col:
nrow, ncol = 1, n_cells
else:
nrow, ncol = int(math.ceil(n_cells / float(max_col))), max_col
fig, axes = plt.subplots(nrow, ncol, figsize=(7.4, 1.5 * nrow + 1))
axes = [axes] if ncol == nrow == 1 else axes.flatten()
for ax in axes[n_cells:]: # hide unused axes
# XXX: Previously done by ax.set_visible(False), but because of mpl
# bug, we just hide the frame.
from .topomap import _hide_frame
_hide_frame(ax)
return fig, axes
def _draw_proj_checkbox(event, params, draw_current_state=True):
"""Toggle options (projectors) dialog"""
from matplotlib import widgets
projs = params['projs']
# turn on options dialog
labels = [p['desc'] for p in projs]
actives = ([p['active'] for p in projs] if draw_current_state else
[True] * len(params['projs']))
width = max([len(p['desc']) for p in projs]) / 6.0 + 0.5
height = len(projs) / 6.0 + 0.5
fig_proj = figure_nobar(figsize=(width, height))
fig_proj.canvas.set_window_title('SSP projection vectors')
params['fig_proj'] = fig_proj # necessary for proper toggling
ax_temp = fig_proj.add_axes((0, 0, 1, 1), frameon=False)
proj_checks = widgets.CheckButtons(ax_temp, labels=labels, actives=actives)
# change already-applied projectors to red
for ii, p in enumerate(projs):
if p['active'] is True:
for x in proj_checks.lines[ii]:
x.set_color('r')
# make minimal size
# pass key presses from option dialog over
proj_checks.on_clicked(partial(_toggle_proj, params=params))
params['proj_checks'] = proj_checks
# this should work for non-test cases
try:
fig_proj.canvas.draw()
fig_proj.show(warn=False)
except Exception:
pass
def _layout_figure(params):
"""Function for setting figure layout. Shared with raw and epoch plots"""
size = params['fig'].get_size_inches() * params['fig'].dpi
scroll_width = 25
hscroll_dist = 25
vscroll_dist = 10
l_border = 100
r_border = 10
t_border = 35
b_border = 40
# only bother trying to reset layout if it's reasonable to do so
if size[0] < 2 * scroll_width or size[1] < 2 * scroll_width + hscroll_dist:
return
# convert to relative units
scroll_width_x = scroll_width / size[0]
scroll_width_y = scroll_width / size[1]
vscroll_dist /= size[0]
hscroll_dist /= size[1]
l_border /= size[0]
r_border /= size[0]
t_border /= size[1]
b_border /= size[1]
# main axis (traces)
ax_width = 1.0 - scroll_width_x - l_border - r_border - vscroll_dist
ax_y = hscroll_dist + scroll_width_y + b_border
ax_height = 1.0 - ax_y - t_border
pos = [l_border, ax_y, ax_width, ax_height]
params['ax'].set_position(pos)
if 'ax2' in params:
params['ax2'].set_position(pos)
params['ax'].set_position(pos)
# vscroll (channels)
pos = [ax_width + l_border + vscroll_dist, ax_y,
scroll_width_x, ax_height]
params['ax_vscroll'].set_position(pos)
# hscroll (time)
pos = [l_border, b_border, ax_width, scroll_width_y]
params['ax_hscroll'].set_position(pos)
if 'ax_button' in params:
# options button
pos = [l_border + ax_width + vscroll_dist, b_border,
scroll_width_x, scroll_width_y]
params['ax_button'].set_position(pos)
if 'ax_help_button' in params:
pos = [l_border - vscroll_dist - scroll_width_x * 2, b_border,
scroll_width_x * 2, scroll_width_y]
params['ax_help_button'].set_position(pos)
params['fig'].canvas.draw()
@verbose
def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent=' ',
read_limit=np.inf, max_str=30, verbose=None):
"""Compare the contents of two fiff files using diff and show_fiff
Parameters
----------
fname_1 : str
First file to compare.
fname_2 : str
Second file to compare.
fname_out : str | None
Filename to store the resulting diff. If None, a temporary
file will be created.
show : bool
If True, show the resulting diff in a new tab in a web browser.
indent : str
How to indent the lines.
read_limit : int
Max number of bytes of data to read from a tag. Can be np.inf
to always read all data (helps test read completion).
max_str : int
Max number of characters of string representation to print for
each tag's data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fname_out : str
The filename used for storing the diff. Could be useful for
when a temporary file is used.
"""
file_1 = show_fiff(fname_1, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
file_2 = show_fiff(fname_2, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
diff = difflib.HtmlDiff().make_file(file_1, file_2, fname_1, fname_2)
if fname_out is not None:
f = open(fname_out, 'wb')
else:
f = tempfile.NamedTemporaryFile('wb', delete=False, suffix='.html')
fname_out = f.name
with f as fid:
fid.write(diff.encode('utf-8'))
if show is True:
webbrowser.open_new_tab(fname_out)
return fname_out
def figure_nobar(*args, **kwargs):
"""Make matplotlib figure with no toolbar"""
from matplotlib import rcParams, pyplot as plt
old_val = rcParams['toolbar']
try:
rcParams['toolbar'] = 'none'
fig = plt.figure(*args, **kwargs)
# remove button press catchers (for toolbar)
cbs = list(fig.canvas.callbacks.callbacks['key_press_event'].keys())
for key in cbs:
fig.canvas.callbacks.disconnect(key)
except Exception as ex:
raise ex
finally:
rcParams['toolbar'] = old_val
return fig
def _helper_raw_resize(event, params):
"""Helper for resizing"""
size = ','.join([str(s) for s in params['fig'].get_size_inches()])
set_config('MNE_BROWSE_RAW_SIZE', size)
_layout_figure(params)
def _plot_raw_onscroll(event, params, len_channels=None):
"""Interpret scroll events"""
if len_channels is None:
len_channels = len(params['info']['ch_names'])
orig_start = params['ch_start']
if event.step < 0:
params['ch_start'] = min(params['ch_start'] + params['n_channels'],
len_channels - params['n_channels'])
else: # event.key == 'up':
params['ch_start'] = max(params['ch_start'] - params['n_channels'], 0)
if orig_start != params['ch_start']:
_channels_changed(params, len_channels)
def _channels_changed(params, len_channels):
"""Helper function for dealing with the vertical shift of the viewport."""
if params['ch_start'] + params['n_channels'] > len_channels:
params['ch_start'] = len_channels - params['n_channels']
if params['ch_start'] < 0:
params['ch_start'] = 0
params['plot_fun']()
def _plot_raw_time(value, params):
"""Deal with changed time value"""
info = params['info']
max_times = params['n_times'] / float(info['sfreq']) - params['duration']
if value > max_times:
value = params['n_times'] / info['sfreq'] - params['duration']
if value < 0:
value = 0
if params['t_start'] != value:
params['t_start'] = value
params['hsel_patch'].set_x(value)
def _plot_raw_onkey(event, params):
"""Interpret key presses"""
import matplotlib.pyplot as plt
if event.key == 'escape':
plt.close(params['fig'])
elif event.key == 'down':
params['ch_start'] += params['n_channels']
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'up':
params['ch_start'] -= params['n_channels']
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'right':
value = params['t_start'] + params['duration']
_plot_raw_time(value, params)
params['update_fun']()
params['plot_fun']()
elif event.key == 'left':
value = params['t_start'] - params['duration']
_plot_raw_time(value, params)
params['update_fun']()
params['plot_fun']()
elif event.key in ['+', '=']:
params['scale_factor'] *= 1.1
params['plot_fun']()
elif event.key == '-':
params['scale_factor'] /= 1.1
params['plot_fun']()
elif event.key == 'pageup':
n_channels = params['n_channels'] + 1
_setup_browser_offsets(params, n_channels)
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'pagedown':
n_channels = params['n_channels'] - 1
if n_channels == 0:
return
_setup_browser_offsets(params, n_channels)
if len(params['lines']) > n_channels: # remove line from view
params['lines'][n_channels].set_xdata([])
params['lines'][n_channels].set_ydata([])
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'home':
duration = params['duration'] - 1.0
if duration <= 0:
return
params['duration'] = duration
params['hsel_patch'].set_width(params['duration'])
params['update_fun']()
params['plot_fun']()
elif event.key == 'end':
duration = params['duration'] + 1.0
if duration > params['raw'].times[-1]:
duration = params['raw'].times[-1]
params['duration'] = duration
params['hsel_patch'].set_width(params['duration'])
params['update_fun']()
params['plot_fun']()
elif event.key == '?':
_onclick_help(event, params)
elif event.key == 'f11':
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
def _mouse_click(event, params):
"""Vertical select callback"""
if event.button != 1:
return
if event.inaxes is None:
if params['n_channels'] > 100:
return
ax = params['ax']
ylim = ax.get_ylim()
pos = ax.transData.inverted().transform((event.x, event.y))
if pos[0] > params['t_start'] or pos[1] < 0 or pos[1] > ylim[0]:
return
params['label_click_fun'](pos)
# vertical scrollbar changed
if event.inaxes == params['ax_vscroll']:
ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
if params['ch_start'] != ch_start:
params['ch_start'] = ch_start
params['plot_fun']()
# horizontal scrollbar changed
elif event.inaxes == params['ax_hscroll']:
_plot_raw_time(event.xdata - params['duration'] / 2, params)
params['update_fun']()
params['plot_fun']()
elif event.inaxes == params['ax']:
params['pick_bads_fun'](event)
def _select_bads(event, params, bads):
"""Helper for selecting bad channels onpick. Returns updated bads list."""
# trade-off, avoid selecting more than one channel when drifts are present
# however for clean data don't click on peaks but on flat segments
def f(x, y):
return y(np.mean(x), x.std() * 2)
lines = event.inaxes.lines
for line in lines:
ydata = line.get_ydata()
if not isinstance(ydata, list) and not np.isnan(ydata).any():
ymin, ymax = f(ydata, np.subtract), f(ydata, np.add)
if ymin <= event.ydata <= ymax:
this_chan = vars(line)['ch_name']
if this_chan in params['info']['ch_names']:
ch_idx = params['ch_start'] + lines.index(line)
if this_chan not in bads:
bads.append(this_chan)
color = params['bad_color']
line.set_zorder(-1)
else:
while this_chan in bads:
bads.remove(this_chan)
color = vars(line)['def_color']
line.set_zorder(0)
line.set_color(color)
params['ax_vscroll'].patches[ch_idx].set_color(color)
break
else:
x = np.array([event.xdata] * 2)
params['ax_vertline'].set_data(x, np.array(params['ax'].get_ylim()))
params['ax_hscroll_vertline'].set_data(x, np.array([0., 1.]))
params['vertline_t'].set_text('%0.3f' % x[0])
return bads
def _onclick_help(event, params):
"""Function for drawing help window"""
import matplotlib.pyplot as plt
text, text2 = _get_help_text(params)
width = 6
height = 5
fig_help = figure_nobar(figsize=(width, height), dpi=80)
fig_help.canvas.set_window_title('Help')
ax = plt.subplot2grid((8, 5), (0, 0), colspan=5)
ax.set_title('Keyboard shortcuts')
plt.axis('off')
ax1 = plt.subplot2grid((8, 5), (1, 0), rowspan=7, colspan=2)
ax1.set_yticklabels(list())
plt.text(0.99, 1, text, fontname='STIXGeneral', va='top', weight='bold',
ha='right')
plt.axis('off')
ax2 = plt.subplot2grid((8, 5), (1, 2), rowspan=7, colspan=3)
ax2.set_yticklabels(list())
plt.text(0, 1, text2, fontname='STIXGeneral', va='top')
plt.axis('off')
tight_layout(fig=fig_help)
# this should work for non-test cases
try:
fig_help.canvas.draw()
fig_help.show(warn=False)
except Exception:
pass
def _setup_browser_offsets(params, n_channels):
"""Aux function for computing viewport height and adjusting offsets."""
ylim = [n_channels * 2 + 1, 0]
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].set_yticks(params['offsets'])
params['ax'].set_ylim(ylim)
params['vsel_patch'].set_height(n_channels)
class ClickableImage(object):
"""
Display an image so you can click on it and store x/y positions.
Takes as input an image array (can be any array that works with imshow,
but will work best with images. Displays the image and lets you
click on it. Stores the xy coordinates of each click, so now you can
superimpose something on top of it.
Upon clicking, the x/y coordinate of the cursor will be stored in
self.coords, which is a list of (x, y) tuples.
Parameters
----------
imdata: ndarray
The image that you wish to click on for 2-d points.
**kwargs : dict
Keyword arguments. Passed to ax.imshow.
Notes
-----
.. versionadded:: 0.9.0
"""
def __init__(self, imdata, **kwargs):
"""Display the image for clicking."""
from matplotlib.pyplot import figure
self.coords = []
self.imdata = imdata
self.fig = figure()
self.ax = self.fig.add_subplot(111)
self.ymax = self.imdata.shape[0]
self.xmax = self.imdata.shape[1]
self.im = self.ax.imshow(imdata, aspect='auto',
extent=(0, self.xmax, 0, self.ymax),
picker=True, **kwargs)
self.ax.axis('off')
self.fig.canvas.mpl_connect('pick_event', self.onclick)
plt_show()
def onclick(self, event):
"""Mouse click handler.
Parameters
----------
event: matplotlib event object
The matplotlib object that we use to get x/y position.
"""
mouseevent = event.mouseevent
self.coords.append((mouseevent.xdata, mouseevent.ydata))
def plot_clicks(self, **kwargs):
"""Plot the x/y positions stored in self.coords.
Parameters
----------
**kwargs : dict
Arguments are passed to imshow in displaying the bg image.
"""
from matplotlib.pyplot import subplots
f, ax = subplots()
ax.imshow(self.imdata, extent=(0, self.xmax, 0, self.ymax), **kwargs)
xlim, ylim = [ax.get_xlim(), ax.get_ylim()]
xcoords, ycoords = zip(*self.coords)
ax.scatter(xcoords, ycoords, c='r')
ann_text = np.arange(len(self.coords)).astype(str)
for txt, coord in zip(ann_text, self.coords):
ax.annotate(txt, coord, fontsize=20, color='r')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
plt_show()
def to_layout(self, **kwargs):
"""Turn coordinates into an MNE Layout object.
Normalizes by the image you used to generate clicks
Parameters
----------
**kwargs : dict
Arguments are passed to generate_2d_layout
"""
from ..channels.layout import generate_2d_layout
coords = np.array(self.coords)
lt = generate_2d_layout(coords, bg_image=self.imdata, **kwargs)
return lt
def _fake_click(fig, ax, point, xform='ax', button=1):
"""Helper to fake a click at a relative point within axes."""
if xform == 'ax':
x, y = ax.transAxes.transform_point(point)
elif xform == 'data':
x, y = ax.transData.transform_point(point)
else:
raise ValueError('unknown transform')
try:
fig.canvas.button_press_event(x, y, button, False, None)
except Exception: # for old MPL
fig.canvas.button_press_event(x, y, button, False)
def add_background_image(fig, im, set_ratios=None):
"""Add a background image to a plot.
Adds the image specified in `im` to the
figure `fig`. This is generally meant to
be done with topo plots, though it could work
for any plot.
Note: This modifies the figure and/or axes
in place.
Parameters
----------
fig: plt.figure
The figure you wish to add a bg image to.
im: ndarray
A numpy array that works with a call to
plt.imshow(im). This will be plotted
as the background of the figure.
set_ratios: None | str
Set the aspect ratio of any axes in fig
to the value in set_ratios. Defaults to None,
which does nothing to axes.
Returns
-------
ax_im: instance of the create matplotlib axis object
corresponding to the image you added.
Notes
-----
.. versionadded:: 0.9.0
"""
if set_ratios is not None:
for ax in fig.axes:
ax.set_aspect(set_ratios)
ax_im = fig.add_axes([0, 0, 1, 1])
ax_im.imshow(im, aspect='auto')
ax_im.set_zorder(-1)
return ax_im
def _find_peaks(evoked, npeaks):
"""Helper function for finding peaks from evoked data
Returns ``npeaks`` biggest peaks as a list of time points.
"""
argrelmax = _get_argrelmax()
gfp = evoked.data.std(axis=0)
order = len(evoked.times) // 30
if order < 1:
order = 1
peaks = argrelmax(gfp, order=order, axis=0)[0]
if len(peaks) > npeaks:
max_indices = np.argsort(gfp[peaks])[-npeaks:]
peaks = np.sort(peaks[max_indices])
times = evoked.times[peaks]
if len(times) == 0:
times = [evoked.times[gfp.argmax()]]
return times
def _process_times(inst, times, n_peaks=None, few=False):
"""Helper to return a list of times for topomaps"""
if isinstance(times, string_types):
if times == "peaks":
if n_peaks is None:
n_peaks = 3 if few else 7
times = _find_peaks(inst, n_peaks)
elif times == "auto":
if n_peaks is None:
n_peaks = 5 if few else 10
times = np.linspace(inst.times[0], inst.times[-1], n_peaks)
else:
raise ValueError("Got an unrecognized method for `times`. Only "
"'peaks' and 'auto' are supported (or directly "
"passing numbers).")
elif np.isscalar(times):
times = [times]
times = np.array(times)
if times.ndim != 1:
raise ValueError('times must be 1D, got %d dimensions' % times.ndim)
if len(times) > 20:
raise RuntimeError('Too many plots requested. Please pass fewer '
'than 20 time instants.')
return times
def plot_sensors(info, kind='topomap', ch_type=None, title=None,
show_names=False, show=True):
"""Plot sensors positions.
Parameters
----------
info : Instance of Info
Info structure containing the channel locations.
kind : str
Whether to plot the sensors as 3d or as topomap. Available options
'topomap', '3d'. Defaults to 'topomap'.
ch_type : 'mag' | 'grad' | 'eeg' | 'seeg' | None
The channel type to plot. If None, then channels are chosen in the
order given above.
title : str | None
Title for the figure. If None (default), equals to
``'Sensor positions (%s)' % ch_type``.
show_names : bool
Whether to display all channel names. Defaults to False.
show : bool
Show figure if True. Defaults to True.
Returns
-------
fig : instance of matplotlib figure
Figure containing the sensor topography.
See Also
--------
mne.viz.plot_layout
Notes
-----
This function plots the sensor locations from the info structure using
matplotlib. For drawing the sensors using mayavi see
:func:`mne.viz.plot_trans`.
.. versionadded:: 0.12.0
"""
if kind not in ['topomap', '3d']:
raise ValueError("Kind must be 'topomap' or '3d'.")
if not isinstance(info, Info):
raise TypeError('info must be an instance of Info not %s' % type(info))
ch_indices = channel_indices_by_type(info)
allowed_types = ['mag', 'grad', 'eeg', 'seeg']
if ch_type is None:
for this_type in allowed_types:
if _contains_ch_type(info, this_type):
ch_type = this_type
break
elif ch_type not in allowed_types:
raise ValueError("ch_type must be one of %s not %s!" % (allowed_types,
ch_type))
picks = ch_indices[ch_type]
if kind == 'topomap':
pos = _auto_topomap_coords(info, picks, True)
else:
pos = np.asarray([ch['loc'][:3] for ch in info['chs']])[picks]
def_colors = _handle_default('color')
ch_names = np.array(info['ch_names'])[picks]
bads = [idx for idx, name in enumerate(ch_names) if name in info['bads']]
colors = ['red' if i in bads else def_colors[channel_type(info, pick)]
for i, pick in enumerate(picks)]
title = 'Sensor positions (%s)' % ch_type if title is None else title
fig = _plot_sensors(pos, colors, ch_names, title, show_names, show)
return fig
def _onpick_sensor(event, fig, ax, pos, ch_names):
"""Callback for picked channel in plot_sensors."""
ind = event.ind[0] # Just take the first sensor.
ch_name = ch_names[ind]
this_pos = pos[ind]
# XXX: Bug in matplotlib won't allow setting the position of existing
# text item, so we create a new one.
ax.texts.pop(0)
if len(this_pos) == 3:
ax.text(this_pos[0], this_pos[1], this_pos[2], ch_name)
else:
ax.text(this_pos[0], this_pos[1], ch_name)
fig.canvas.draw()
def _plot_sensors(pos, colors, ch_names, title, show_names, show):
"""Helper function for plotting sensors."""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from .topomap import _check_outlines, _draw_outlines
fig = plt.figure()
if pos.shape[1] == 3:
ax = Axes3D(fig)
ax = fig.gca(projection='3d')
ax.text(0, 0, 0, '', zorder=1)
ax.scatter(pos[:, 0], pos[:, 1], pos[:, 2], picker=True, c=colors)
ax.azim = 90
ax.elev = 0
else:
ax = fig.add_subplot(111)
ax.text(0, 0, '', zorder=1)
ax.set_xticks([])
ax.set_yticks([])
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None,
hspace=None)
pos, outlines = _check_outlines(pos, 'head')
_draw_outlines(ax, outlines)
ax.scatter(pos[:, 0], pos[:, 1], picker=True, c=colors)
if show_names:
for idx in range(len(pos)):
this_pos = pos[idx]
if pos.shape[1] == 3:
ax.text(this_pos[0], this_pos[1], this_pos[2], ch_names[idx])
else:
ax.text(this_pos[0], this_pos[1], ch_names[idx])
else:
picker = partial(_onpick_sensor, fig=fig, ax=ax, pos=pos,
ch_names=ch_names)
fig.canvas.mpl_connect('pick_event', picker)
fig.suptitle(title)
plt_show(show)
return fig
def _compute_scalings(scalings, inst):
"""Compute scalings for each channel type automatically.
Parameters
----------
scalings : dict
The scalings for each channel type. If any values are
'auto', this will automatically compute a reasonable
scaling for that channel type. Any values that aren't
'auto' will not be changed.
inst : instance of Raw or Epochs
The data for which you want to compute scalings. If data
is not preloaded, this will read a subset of times / epochs
up to 100mb in size in order to compute scalings.
Returns
-------
scalings : dict
A scalings dictionary with updated values
"""
from ..io.base import _BaseRaw
from ..epochs import _BaseEpochs
if not isinstance(inst, (_BaseRaw, _BaseEpochs)):
raise ValueError('Must supply either Raw or Epochs')
if scalings is None:
# If scalings is None just return it and do nothing
return scalings
ch_types = channel_indices_by_type(inst.info)
ch_types = dict([(i_type, i_ixs)
for i_type, i_ixs in ch_types.items() if len(i_ixs) != 0])
if scalings == 'auto':
# If we want to auto-compute everything
scalings = dict((i_type, 'auto') for i_type in ch_types.keys())
if not isinstance(scalings, dict):
raise ValueError('scalings must be a dictionary of ch_type: val pairs,'
' not type %s ' % type(scalings))
scalings = deepcopy(scalings)
if inst.preload is False:
if isinstance(inst, _BaseRaw):
# Load a window of data from the center up to 100mb in size
n_times = 1e8 // (len(inst.ch_names) * 8)
n_times = np.clip(n_times, 1, inst.n_times)
n_secs = n_times / float(inst.info['sfreq'])
time_middle = np.mean(inst.times)
tmin = np.clip(time_middle - n_secs / 2., inst.times.min(), None)
tmax = np.clip(time_middle + n_secs / 2., None, inst.times.max())
data = inst._read_segment(tmin, tmax)
elif isinstance(inst, _BaseEpochs):
# Load a random subset of epochs up to 100mb in size
n_epochs = 1e8 // (len(inst.ch_names) * len(inst.times) * 8)
n_epochs = int(np.clip(n_epochs, 1, len(inst)))
ixs_epochs = np.random.choice(range(len(inst)), n_epochs, False)
inst = inst.copy()[ixs_epochs].load_data()
else:
data = inst._data
if isinstance(inst, _BaseEpochs):
data = inst._data.reshape([len(inst.ch_names), -1])
# Iterate through ch types and update scaling if ' auto'
for key, value in scalings.items():
if value != 'auto':
continue
if key not in ch_types.keys():
raise ValueError("Sensor {0} doesn't exist in data".format(key))
this_data = data[ch_types[key]]
scale_factor = np.percentile(this_data.ravel(), [0.5, 99.5])
scale_factor = np.max(np.abs(scale_factor))
scalings[key] = scale_factor
return scalings
|
ARudiuk/mne-python
|
mne/viz/utils.py
|
Python
|
bsd-3-clause
| 40,089
|
[
"Mayavi"
] |
5c59bb0a6ce01ae3eae00cb12490830a7712375178636d9912b162e98959ce73
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.dustpedia.data.seds Contains the SEDFetcher class.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from ...core.basics.configurable import Configurable
from ..core.photometry import DustPediaPhotometry
from ...core.tools import filesystem as fs
from ..core.sample import DustPediaSample
from ...core.basics.log import log
# -----------------------------------------------------------------
class SEDFetcher(Configurable):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs
"""
# Call the constructor of the base class
super(SEDFetcher, self).__init__(*args, **kwargs)
# The DustPediaSample object
self.sample = DustPediaSample()
# The DustPediaPhotometry object
self.photometry = DustPediaPhotometry()
# The SED
self.sed = None
# The NGC ID
self.ngc_name = None
# -----------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# 2. Get the SED
self.get_sed()
# 3. Write
if self.config.write: self.write()
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup of the base class
super(SEDFetcher, self).setup(**kwargs)
# Get the NGC ID
if "ngc_name" in kwargs: self.ngc_name = kwargs.pop("ngc_name")
else: self.ngc_name = self.sample.get_name(self.config.galaxy_name)
# -----------------------------------------------------------------
def get_sed(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Getting the SED ...")
# Get the SED for the galaxy
self.sed = self.photometry.get_sed(self.ngc_name, add_iras=self.config.iras, add_planck=self.config.planck)
# -----------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Write the SED
self.write_sed()
# -----------------------------------------------------------------
def write_sed(self):
"""
This function ...
:return:
"""
# Determine the path
path = fs.join(self.config.path, self.config.galaxy_name + ".dat")
# Save the SED
self.sed.saveto(path)
# -----------------------------------------------------------------
|
SKIRT/PTS
|
dustpedia/data/seds.py
|
Python
|
agpl-3.0
| 3,270
|
[
"Galaxy"
] |
a6a5504eecc9082c934c65cd71dd7320e4840390605ba8600b2f24cac2ff3653
|
"""
Tests for coloring methods of molecule representations.
"""
from VMD import molecule as _molecule, molrep as _molrep
from pyvmd.representations import (COLOR_BACKBONE, COLOR_BETA, COLOR_CHAIN, COLOR_CHARGE, COLOR_COLOR,
COLOR_CONFORMATION, COLOR_ELEMENT, COLOR_FRAGMENT, COLOR_INDEX, COLOR_MASS,
COLOR_MOLECULE, COLOR_NAME, COLOR_OCCUPANCY, COLOR_PHYSICAL_TIME, COLOR_POS,
COLOR_POS_X, COLOR_POS_Y, COLOR_POS_Z, COLOR_RESID, COLOR_RESNAME, COLOR_RESTYPE,
COLOR_SEGNAME, COLOR_STRUCTURE, COLOR_THROB, COLOR_TIMESTEP, COLOR_TYPE, COLOR_USER,
COLOR_USER_2, COLOR_USER_3, COLOR_USER_4, COLOR_VELOCITY, COLOR_VOLUME,
Representation)
from .utils import data, PyvmdTestCase
class TestColoringMethods(PyvmdTestCase):
"""
Test coloring methods are defined correctly.
"""
def setUp(self):
self.molid = _molecule.load('psf', data('water.psf'), 'pdb', data('water.pdb'))
self.rep = Representation('rep0')
self.color = self.rep.color
def test_name(self):
self.color.method = COLOR_NAME
self.assertEqual(self.color.method, COLOR_NAME)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Name')
def test_type(self):
self.color.method = COLOR_TYPE
self.assertEqual(self.color.method, COLOR_TYPE)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Type')
def test_element(self):
self.color.method = COLOR_ELEMENT
self.assertEqual(self.color.method, COLOR_ELEMENT)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Element')
def test_resname(self):
self.color.method = COLOR_RESNAME
self.assertEqual(self.color.method, COLOR_RESNAME)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'ResName')
def test_restype(self):
self.color.method = COLOR_RESTYPE
self.assertEqual(self.color.method, COLOR_RESTYPE)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'ResType')
def test_resid(self):
self.color.method = COLOR_RESID
self.assertEqual(self.color.method, COLOR_RESID)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'ResID')
def test_chain(self):
self.color.method = COLOR_CHAIN
self.assertEqual(self.color.method, COLOR_CHAIN)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Chain')
def test_segname(self):
self.color.method = COLOR_SEGNAME
self.assertEqual(self.color.method, COLOR_SEGNAME)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'SegName')
def test_conformation(self):
self.color.method = COLOR_CONFORMATION
self.assertEqual(self.color.method, COLOR_CONFORMATION)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Conformation')
def test_molecule(self):
self.color.method = COLOR_MOLECULE
self.assertEqual(self.color.method, COLOR_MOLECULE)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Molecule')
def test_structure(self):
self.color.method = COLOR_STRUCTURE
self.assertEqual(self.color.method, COLOR_STRUCTURE)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Structure')
def test_color(self):
self.color.method = COLOR_COLOR
self.assertEqual(self.color.method, COLOR_COLOR)
self.assertEqual(self.color.get_parameters(), {'color': 1})
self.assertEqual(_molrep.get_color(self.molid, 0), 'ColorID 1')
self.color.set_parameters(color=3)
self.assertEqual(self.color.method, COLOR_COLOR)
self.assertEqual(self.color.get_parameters(), {'color': 3})
self.assertEqual(_molrep.get_color(self.molid, 0), 'ColorID 3')
def test_beta(self):
self.color.method = COLOR_BETA
self.assertEqual(self.color.method, COLOR_BETA)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Beta')
def test_occupancy(self):
self.color.method = COLOR_OCCUPANCY
self.assertEqual(self.color.method, COLOR_OCCUPANCY)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Occupancy')
def test_mass(self):
self.color.method = COLOR_MASS
self.assertEqual(self.color.method, COLOR_MASS)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Mass')
def test_charge(self):
self.color.method = COLOR_CHARGE
self.assertEqual(self.color.method, COLOR_CHARGE)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Charge')
def test_pos(self):
self.color.method = COLOR_POS
self.assertEqual(self.color.method, COLOR_POS)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Pos')
def test_pos_x(self):
self.color.method = COLOR_POS_X
self.assertEqual(self.color.method, COLOR_POS_X)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'PosX')
def test_pos_y(self):
self.color.method = COLOR_POS_Y
self.assertEqual(self.color.method, COLOR_POS_Y)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'PosY')
def test_pos_z(self):
self.color.method = COLOR_POS_Z
self.assertEqual(self.color.method, COLOR_POS_Z)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'PosZ')
def test_user(self):
self.color.method = COLOR_USER
self.assertEqual(self.color.method, COLOR_USER)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'User')
def test_user_2(self):
self.color.method = COLOR_USER_2
self.assertEqual(self.color.method, COLOR_USER_2)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'User2')
def test_user_3(self):
self.color.method = COLOR_USER_3
self.assertEqual(self.color.method, COLOR_USER_3)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'User3')
def test_user_4(self):
self.color.method = COLOR_USER_4
self.assertEqual(self.color.method, COLOR_USER_4)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'User4')
def test_fragment(self):
self.color.method = COLOR_FRAGMENT
self.assertEqual(self.color.method, COLOR_FRAGMENT)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Fragment')
def test_index(self):
self.color.method = COLOR_INDEX
self.assertEqual(self.color.method, COLOR_INDEX)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Index')
def test_backbone(self):
self.color.method = COLOR_BACKBONE
self.assertEqual(self.color.method, COLOR_BACKBONE)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Backbone')
def test_throb(self):
self.color.method = COLOR_THROB
self.assertEqual(self.color.method, COLOR_THROB)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Throb')
def test_physical_time(self):
self.color.method = COLOR_PHYSICAL_TIME
self.assertEqual(self.color.method, COLOR_PHYSICAL_TIME)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'PhysicalTime')
def test_timestep(self):
self.color.method = COLOR_TIMESTEP
self.assertEqual(self.color.method, COLOR_TIMESTEP)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Timestep')
def test_velocity(self):
self.color.method = COLOR_VELOCITY
self.assertEqual(self.color.method, COLOR_VELOCITY)
self.assertEqual(self.color.get_parameters(), {})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Velocity')
def test_volume(self):
self.color.method = COLOR_VOLUME
self.assertEqual(self.color.method, COLOR_VOLUME)
self.assertEqual(self.color.get_parameters(), {'volume_id': 0})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Volume 0')
self.color.set_parameters(volume_id=42)
self.assertEqual(self.color.method, COLOR_VOLUME)
self.assertEqual(self.color.get_parameters(), {'volume_id': 42})
self.assertEqual(_molrep.get_color(self.molid, 0), 'Volume 42')
|
ziima/pyvmd
|
pyvmd/tests/test_coloring_methods.py
|
Python
|
gpl-3.0
| 9,871
|
[
"VMD"
] |
d1bbb97beceda88e1a8c979f24c592f7921967c76955ffb2a2d4d1ca8cb8a83a
|
# Example of ymport.unv function and also export.VTKExporter.exportFacetsAsMesh
# Reads facets from shell.unv and saves them as one mesh to vtk file
from yade import ymport,export
f,n,ct = ymport.unv('shell.unv',returnConnectivityTable=True)
O.bodies.append(f)
vtk = export.VTKExporter('test')
vtk.exportFacetsAsMesh(connectivityTable=ct)
|
yade/trunk
|
examples/test/unv-read/unvReadVTKExport.py
|
Python
|
gpl-2.0
| 341
|
[
"VTK"
] |
f617d94eaf328c495135b66d95c5706399f6bdcb398e403099f0fd5809c640e1
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 15:31:31 2015
@author: hanbre
"""
from __future__ import print_function
import sys
import numpy as np
import pandas as pd
import xray
import datetime
import netCDF4
from mpl_toolkits.basemap import Basemap
import matplotlib
from matplotlib.pylab import *
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import Normalize
import seaborn as sns
from IPython import embed
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def read_data(id_in):
data = xray.open_dataset(id_in)
return data
def plotter(vm,x,y):
#fig=figure()
print('plotter')
xx,yy=np.meshgrid(x,y)
if shape(xx)!=shape(vm):
vm=vm.transpose()
gases = ['O3','HCL','CL','CLY','']
if var in gases:
CF = contourf(x,y,vm,linspace(np.amin(vm.values),np.amax(vm.values),10),cmap=matplotlib.cm.jet)
CS=contour(x, y, vm,linspace(np.amin(vm.values),np.amax(vm.values),10),colors='k')
elif var == 'T':
CF = contourf(x,y,vm,linspace(np.amin(vm.values),400,10),cmap=matplotlib.cm.jet)
CS=contour(x, y, vm,linspace(np.amin(vm.values),400,10),colors='k')
else:
norm = MidpointNormalize(midpoint=0)
CF=contourf(x,y,vm,np.linspace(np.amin(vm.values),np.amax(vm.values),1000),norm=norm,cmap='seismic')
CS=contour(x, y, vm,10,colors='k')
xlabel(x.units);ylabel(y.units)
clb = colorbar(CF); clb.set_label('('+v.units+')')
#title=('{0} at {1}={2} and {3}={4}'.format(var,getattr(v,pvar1)[p1],getattr(v,pvar1)[p1].values,getattr(v,pvar2)[p2],getattr(v,pvar2)[p2].values))
#close(fig)
return
def meaner(v,mvars):
vm = v.mean(dim=mvars)
return vm
def pointextr(v,pvar1,p1,pvar2,p2,pvars):
vm = v[pvars]
return vm
if __name__=='__main__':
avgall=False; bandavg=False; point=False;
if len(sys.argv)<5 or 'help' in sys.argv:
print( 'This script takes at least 5 command line arguments ',len(sys.argv),' is given. \n')
print( 'The usage is: Name of this script; path and name of netcdf file to be analysed;\n')
print( 'name of variable; name of x-axis; name of y-axis (time, lev, lat, lon)')
print( 'The 6th argumaent must be either point or band. If point')
print( 'a point must be specified in the other two dimensions on the form (dim1 point1 dim2 point2)')
sys.exit()
elif len(sys.argv)==5:
avgall = True
elif len(sys.argv) > 5:
if sys.argv[5] == 'band':
bandavg = True
if sys.argv[5] == 'cut':
point = True
if sys.argv[5] == 'point':
point = True
dim1 = sys.argv[6]
point1 = double(sys.argv[7])
dim2 = sys.argv[8]
point2 = double(sys.argv[9])
else:
print( "If this script is given more than 5 command line arguments, sys.argv[5] has to be 'cut', 'point' or 'band'. Give 'help' as an argument to show help text.")
sys.exit()
id_in=sys.argv[1]; var=sys.argv[2]
ds=read_data(id_in)
|
hansbrenna/NetCDF_postprocessor
|
plotter4.py
|
Python
|
gpl-3.0
| 3,593
|
[
"NetCDF"
] |
24c9a0a931ed72906723b549b5fdad5d54c9f2efa465dcbad144df037eb5138d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""BBOB noiseless testbed.
The optimisation test functions are represented as classes
:py:class:`F1` to :py:class:`F24` and :py:class:`F101` to
:py:class:`F130`.
Each of these classes has an _evalfull method which expects as argument
an array of row vectors and returns a 'noisy' and a 'noiseless' float
values.
This module implements the class :py:class:`BBOBFunction` and
sub-classes:
* :py:class:`BBOBNfreeFunction` which have all the methods common to the
classes :py:class:`F1` to :py:class:`F24`
* :py:class:`BBOBGaussFunction`, :py:class:`BBOBCauchyFunction`,
:py:class:`BBOBUniformFunction` which have methods in classes from
:py:class:`F101` to :py:class:`F130`
Module attributes:
* :py:data:`dictbbob` is a dictionary such that dictbbob[2] contains
the test function class F2 and f2 = dictbbob[2]() returns
the instance 0 of the test function that can be
called as f2([1,2,3]).
* :py:data:`nfreeIDs` == range(1,25) indices for the noiseless functions that can be
found in dictbbob
* :py:data:`noisyIDs` == range(101, 131) indices for the noisy functions that can be
found in dictbbob. We have nfreeIDs + noisyIDs == sorted(dictbbob.keys())
* :py:data:`nfreeinfos` function infos
Examples:
>>> from cocopp.eaf import bbobbenchmarks as bn
>>> for s in bn.nfreeinfos:
... print s
1: Noise-free Sphere function
2: Separable ellipsoid with monotone transformation
<BLANKLINE>
Parameter: condition number (default 1e6)
<BLANKLINE>
<BLANKLINE>
3: Rastrigin with monotone transformation separable "condition" 10
4: skew Rastrigin-Bueche, condition 10, skew-"condition" 100
5: Linear slope
6: Attractive sector function
7: Step-ellipsoid, condition 100, noise-free
8: Rosenbrock noise-free
9: Rosenbrock, rotated
10: Ellipsoid with monotone transformation, condition 1e6
11: Discus (tablet) with monotone transformation, condition 1e6
12: Bent cigar with asymmetric space distortion, condition 1e6
13: Sharp ridge
14: Sum of different powers, between x^2 and x^6, noise-free
15: Rastrigin with asymmetric non-linear distortion, "condition" 10
16: Weierstrass, condition 100
17: Schaffers F7 with asymmetric non-linear transformation, condition 10
18: Schaffers F7 with asymmetric non-linear transformation, condition 1000
19: F8F2 sum of Griewank-Rosenbrock 2-D blocks, noise-free
20: Schwefel with tridiagonal variable transformation
21: Gallagher with 101 Gaussian peaks, condition up to 1000, one global rotation, noise-free
22: Gallagher with 21 Gaussian peaks, condition up to 1000, one global rotation
23: Katsuura function
24: Lunacek bi-Rastrigin, condition 100
<BLANKLINE>
in PPSN 2008, Rastrigin part rotated and scaled
<BLANKLINE>
<BLANKLINE>
>>> f3 = bn.F3(13) # instantiate function 3 on instance 13
>>> f3.evaluate([0, 1, 2]) # also: f3([0, 1, 2]) # doctest: +ELLIPSIS
59.8733529...
>>> f3.evaluate([[0, 1, 2], [3, 4, 5]])
array([ 59.87335291, 441.17409304])
>>> print bn.instantiate(5)[1] # returns evaluation function and target
51.53
>>> print bn.nfreeIDs # list noise-free functions
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
>>> for i in bn.nfreeIDs: # evaluate all noiseless functions once
... print bn.instantiate(i)[0]([0., 0., 0., 0.]),
-77.27454592 6180022.82173 92.9877507529 92.9877507529 140.510117618 70877.9554128 -72.5505202195 33355.7924722 -339.94 4374717.49343 15631566.3487 4715481.0865 550.599783901 -17.2991756229 27.3633128519 -227.827833529 -24.3305918781 131.420159348 40.7103737427 6160.81782924 376.746889545 107.830426761 220.482266557 106.094767386
"""
# TODO: define interface for this module.
# TODO: funId is expected to be a number since it is used as rseed.
import warnings
from pdb import set_trace
import numpy as np
from math import floor as floor
from numpy import dot, linspace, diag, tile, zeros, sign, resize
from numpy.random import standard_normal as _randn # TODO: may bring confusion
from numpy.random import random as _rand # TODO: may bring confusion
import sys
sys.path.insert(0, '../../../')
"""
% VAL = BENCHMARKS(X, FUNCID)
% VAL = BENCHMARKS(X, STRFUNC)
% Input:
% X -- solution column vector or matrix of column vectors
% FUNCID -- number of function to be executed with X as input,
% by default 8.
% STRFUNC -- function as string to be executed with X as input
% Output: function value(s) of solution(s)
% Examples:
% F = BENCHMARKS([1 2 3]', 17);
% F = BENCHMARKS([1 2 3]', 'f1');
%
% NBS = BENCHMARKS()
% NBS = BENCHMARKS('FunctionIndices')
% Output:
% NBS -- array of valid benchmark function numbers,
% presumably 1:24
%
% FHS = BENCHMARKS('handles')
% Output:
% FHS -- cell array of function handles
% Examples:
% FHS = BENCHMARKS('handles');
% f = FHS{1}(x); % evaluates x on the sphere function f1
% f = feval(FHS{1}, x); % ditto
%
% see also: functions FGENERIC, BENCHMARKINFOS, BENCHMARKSNOISY
% Authors (copyright 2009): Nikolaus Hansen, Raymond Ros, Steffen Finck
% Version = 'Revision: $Revision: 1115 $'
% Last Modified: $Date: 2009-02-09 19:22:42 +0100 (Mon, 09 Feb 2009) $
% INTERFACE OF BENCHMARK FUNCTIONS
% FHS = BENCHMARKS('handles');
% FUNC = FHS{1};
%
% [FVALUE, FTRUE] = FUNC(X)
% [FVALUE, FTRUE] = FUNC(X, [], IINSTANCE)
% Input: X -- matrix of column vectors
% IINSTANCE -- instance number of the function, sets function
% instance (XOPT, FOPT, rotation matrices,...)
% up until a new number is set, or the function is
% cleared. Default is zero.
% Output: row vectors with function value for each input column
% FVALUE -- function value
% FTRUE -- noise-less, deterministic function value
% [FOPT STRFUNCTION] = FUNC('any_even_empty_string', ...)
% Output:
% FOPT -- function value at optimum
% STRFUNCTION -- not yet implemented: function description string, ID before first whitespace
% [FOPT STRFUNCTION] = FUNC('any_even_empty_string', DIM, NTRIAL)
% Sets rotation matrices and xopt depending on NTRIAL (by changing the random seed).
% Output:
% FOPT -- function value at optimum
% STRFUNCTION -- not yet implemented: function description string, ID before first whitespace
% [FOPT, XOPT] = FUNC('xopt', DIM)
% Output:
% FOPT -- function value at optimum XOPT
% XOPT -- optimal solution vector in DIM-D
% [FOPT, MATRIX] = FUNC('linearTF', DIM) % might vanish in future
% Output:
% FOPT -- function value at optimum XOPT
% MATRIX -- used transformation matrix
"""
### FUNCTION DEFINITION ###
def compute_xopt(rseed, dim):
"""Generate a random vector used as optimum argument.
Rounded by four digits, but never to zero.
"""
xopt = 8 * np.floor(1e4 * unif(dim, rseed))/1e4 - 4
idx = (xopt == 0)
xopt[idx] = -1e-5
return xopt
def compute_rotation(seed, dim):
"""Returns an orthogonal basis.
The rotation is used in several ways and in combination with
non-linear transformations. Search space rotation invariant
algorithms are not expected to be invariant under this rotation.
"""
B = np.reshape(gauss(dim * dim, seed), (dim, dim))
for i in range(dim):
for j in range(0, i):
B[i] = B[i] - dot(B[i], B[j]) * B[j]
B[i] = B[i] / (np.sum(B[i]**2) ** .5)
return B
def monotoneTFosc(f):
"""Maps [-inf,inf] to [-inf,inf] with different constants
for positive and negative part.
"""
if np.isscalar(f):
if f > 0.:
f = np.log(f) / 0.1
f = np.exp(f + 0.49*(np.sin(f) + np.sin(0.79*f))) ** 0.1
elif f < 0.:
f = np.log(-f) / 0.1
f = -np.exp(f + 0.49*(np.sin(0.55*f) + np.sin(0.31*f))) ** 0.1
return f
else:
f = np.asarray(f)
g = f.copy()
idx = (f > 0)
g[idx] = np.log(f[idx]) / 0.1
g[idx] = np.exp(g[idx] + 0.49*(np.sin(g[idx]) + np.sin(0.79*g[idx]))) ** 0.1
idx = (f < 0)
g[idx] = np.log(-f[idx]) / 0.1
g[idx] = -np.exp(g[idx] + 0.49*(np.sin(0.55*g[idx]) + np.sin(0.31*g[idx]))) ** 0.1
return g
def defaultboundaryhandling(x, fac):
"""Returns a float penalty for being outside of boundaries [-5, 5]"""
xoutside = np.maximum(0., np.abs(x) - 5) * sign(x)
fpen = fac * np.sum(xoutside**2, -1) # penalty
return fpen
def gauss(N, seed):
"""Samples N standard normally distributed numbers
being the same for a given seed
"""
r = unif(2*N, seed)
g = np.sqrt(-2 * np.log(r[:N])) * np.cos(2 * np.pi * r[N:2*N])
if np.any(g == 0.):
g[g == 0] = 1e-99
return g
def unif(N, inseed):
"""Generates N uniform numbers with starting seed."""
# initialization
inseed = np.abs(inseed)
if inseed < 1.:
inseed = 1.
rgrand = 32 * [0.]
aktseed = inseed
for i in xrange(39, -1, -1):
tmp = floor(aktseed/127773.)
aktseed = 16807. * (aktseed - tmp * 127773.) - 2836. * tmp
if aktseed < 0:
aktseed = aktseed + 2147483647.
if i < 32:
rgrand[i] = aktseed
aktrand = rgrand[0]
# sample numbers
r = int(N) * [0.]
for i in xrange(int(N)):
tmp = floor(aktseed/127773.)
aktseed = 16807. * (aktseed - tmp * 127773.) - 2836. * tmp
if aktseed < 0:
aktseed = aktseed + 2147483647.
tmp = int(floor(aktrand / 67108865.))
aktrand = rgrand[tmp]
rgrand[tmp] = aktseed
r[i] = aktrand / 2.147483647e9
r = np.asarray(r)
if (r == 0).any():
warning.warn('zero sampled(?), set to 1e-99')
r[r == 0] = 1e-99
return r
# for testing and comparing to other implementations,
# myrand and myrandn are used only for sampling the noise
# Rename to myrand and myrandn to rand and randn and
# comment lines 24 and 25.
_randomnseed = 30. # warning this is a global variable...
def _myrandn(size):
"""Normal random distribution sampling.
For testing and comparing purpose.
"""
global _randomnseed
_randomnseed = _randomnseed + 1.
if _randomnseed > 1e9:
_randomnseed = 1.
res = np.reshape(gauss(np.prod(size), _randomnseed), size)
return res
_randomseed = 30. # warning this is a global variable...
def _myrand(size):
"""Uniform random distribution sampling.
For testing and comparing purpose.
"""
global _randomseed
_randomseed = _randomseed + 1
if _randomseed > 1e9:
_randomseed = 1
res = np.reshape(unif(np.prod(size), _randomseed), size)
return res
def fGauss(ftrue, beta):
"""Returns Gaussian model noisy value."""
# expects ftrue to be a np.array
popsi = np.shape(ftrue)
fval = ftrue * np.exp(beta * _randn(popsi)) # with gauss noise
tol = 1e-8
fval = fval + 1.01 * tol
idx = ftrue < tol
try:
fval[idx] = ftrue[idx]
except IndexError: # fval is a scalar
if idx:
fval = ftrue
return fval
def fUniform(ftrue, alpha, beta):
"""Returns uniform model noisy value."""
# expects ftrue to be a np.array
popsi = np.shape(ftrue)
fval = (_rand(popsi) ** beta * ftrue *
np.maximum(1., (1e9 / (ftrue + 1e-99)) ** (alpha * _rand(popsi))))
tol = 1e-8
fval = fval + 1.01 * tol
idx = ftrue < tol
try:
fval[idx] = ftrue[idx]
except IndexError: # fval is a scalar
if idx:
fval = ftrue
return fval
def fCauchy(ftrue, alpha, p):
"""Returns Cauchy model noisy value
Cauchy with median 1e3*alpha and with p=0.2, zero otherwise
P(Cauchy > 1,10,100,1000) = 0.25, 0.032, 0.0032, 0.00032
"""
# expects ftrue to be a np.array
popsi = np.shape(ftrue)
fval = ftrue + alpha * np.maximum(0., 1e3 + (_rand(popsi) < p) *
_randn(popsi) / (np.abs(_randn(popsi)) + 1e-199))
tol = 1e-8
fval = fval + 1.01 * tol
idx = ftrue < tol
try:
fval[idx] = ftrue[idx]
except IndexError: # fval is a scalar
if idx:
fval = ftrue
return fval
### CLASS DEFINITION ###
class AbstractTestFunction():
"""Abstract class for test functions.
Defines methods to be implemented in test functions which are to be
provided to method setfun of class Logger.
In particular, (a) the attribute fopt and (b) the method _evalfull.
The _evalfull method returns two values, the possibly noisy value and
the noise-free value. The latter is only meant to be for recording purpose.
"""
def __call__(self, x): # makes the instances callable
"""Returns the objective function value of argument x.
Example:
>>> from cocopp.eaf import bbobbenchmarks as bn
>>> f3 = bn.F3(13) # instantiate function 3 on instance 13
>>> f3([0, 1, 2]) # call f3, same as f3.evaluate([0, 1, 2]) # doctest: +ELLIPSIS
59.8733529...
"""
return self.evaluate(x)
def evaluate(self, x):
"""Returns the objective function value (in case noisy).
"""
return self._evalfull(x)[0]
# TODO: is it better to leave evaluate out and check for hasattr('evaluate') in ExpLogger?
def _evalfull(self, x):
"""return noisy and noise-free value, the latter for recording purpose. """
raise NotImplementedError
def getfopt(self):
"""Returns the best function value of this instance of the function."""
# TODO: getfopt error:
# import bbobbenchmarks as bb
# bb.instantiate(1)[0].getfopt()
# AttributeError: F1 instance has no attribute '_fopt'
if not hasattr(self, 'iinstance'):
raise Exception('This function class has not been instantiated yet.')
return self._fopt
def setfopt(self, fopt):
try:
self._fopt = float(fopt)
except ValueError:
raise Exception('Optimal function value must be cast-able to a float.')
fopt = property(getfopt, setfopt)
class BBOBFunction(AbstractTestFunction):
"""Abstract class of BBOB test functions.
Implements some base functions that are used by the test functions
of BBOB such as initialisations of class attributes.
"""
def __init__(self, iinstance=0, zerox=False, zerof=False, param=None, **kwargs):
"""Common initialisation.
Keyword arguments:
iinstance -- instance of the function (int)
zerox -- sets xopt to [0, ..., 0]
zerof -- sets fopt to 0
param -- parameter of the function (if applicable)
kwargs -- additional attributes
"""
# Either self.rrseed or self.funId have to be defined for BBOBFunctions
# TODO: enforce
try:
rrseed = self.rrseed
except AttributeError:
rrseed = self.funId
try:
self.rseed = rrseed + 1e4 * iinstance
except TypeError:
# rrseed AND iinstance have to be float
warnings.warn('self.rseed could not be set, reset to 1 instead.')
self.rseed = 1
self.zerox = zerox
if zerof:
self.fopt = 0.
else:
self.fopt = min(1000, max(-1000, (np.round(100*100*gauss(1, self.rseed)[0]/gauss(1, self.rseed+1)[0])/100)))
self.iinstance = iinstance
self.dim = None
self.lastshape = None
self.param = param
for i, v in kwargs.iteritems():
setattr(self, i, v)
self._xopt = None
def shape_(self, x):
# this part is common to all evaluate function
# it is assumed x are row vectors
curshape = np.shape(x)
dim = np.shape(x)[-1]
return curshape, dim
def getiinstance(self):
"""Designates the instance of the function class.
An instance in this case means a given target function value, a
given optimal argument x, and given transformations for the
function. It needs to have a string representation. Preferably
it should be a number or a string.
"""
return self._iinstance
def setiinstance(self, iinstance):
self._iinstance = iinstance
iinstance = property(getiinstance, setiinstance)
def shortstr(self):
"""Gives a short string self representation (shorter than str(self))."""
res = 'F%s' % str(self.funId)
if hasattr(self, 'param'):
res += '_p%s' % str(self.param) # NH param -> self.param
return res
def __eq__(self, obj):
return (self.funId == obj.funId
and (not hasattr(self, 'param') or self.param == obj.param))
# TODO: make this test on other attributes than param?
# def dimensionality(self, dim):
# """Return the availability of dimensionality dim."""
# return True
# GETTERS
# def getfopt(self):
# """Optimal Function Value."""
# return self._fopt
# fopt = property(getfopt)
def _setxopt(self, xopt):
"""Return the argument of the optimum of the function."""
self._xopt = xopt
def _getxopt(self):
"""Return the argument of the optimum of the function."""
if self._xopt is None:
warnings.warn('You need to evaluate object to set dimension first.')
return self._xopt
xopt = property(_getxopt, _setxopt)
# def getrange(self):
# """Return the domain of the function."""
# #TODO: could depend on the dimension
# # TODO: return exception NotImplemented yet
# pass
# range = property(getrange)
# def getparam(self):
# """Optional parameter value."""
# return self._param
# param = property(getparam)
# def getitrial(self):
# """Instance id number."""
# return self._itrial
# itrial = property(getitrial)
# def getlinearTf(self):
# return self._linearTf
# linearTf = property(getlinearTf)
# def getrotation(self):
# return self._rotation
# rotation = property(getrotation)
class BBOBNfreeFunction(BBOBFunction):
"""Class of the noise-free functions of BBOB."""
def noise(self, ftrue):
"""Returns the noise-free function values."""
return ftrue.copy()
class BBOBGaussFunction(BBOBFunction):
"""Class of the Gauss noise functions of BBOB.
Attribute gaussbeta needs to be defined by inheriting classes.
"""
# gaussbeta = None
def noise(self, ftrue):
"""Returns the noisy function values."""
return fGauss(ftrue, self.gaussbeta)
def boundaryhandling(self, x):
return defaultboundaryhandling(x, 100.)
class BBOBUniformFunction(BBOBFunction, object):
"""Class of the uniform noise functions of BBOB.
Attributes unifalphafac and unifbeta need to be defined by inheriting
classes.
"""
# unifalphafac = None
# unifbeta = None
def noise(self, ftrue):
"""Returns the noisy function values."""
return fUniform(ftrue, self.unifalphafac * (0.49 + 1. / self.dim), self.unifbeta)
def boundaryhandling(self, x):
return defaultboundaryhandling(x, 100.)
class BBOBCauchyFunction(BBOBFunction):
"""Class of the Cauchy noise functions of BBOB.
Attributes cauchyalpha and cauchyp need to be defined by inheriting
classes.
"""
# cauchyalpha = None
# cauchyp = None
def noise(self, ftrue):
"""Returns the noisy function values."""
return fCauchy(ftrue, self.cauchyalpha, self.cauchyp)
def boundaryhandling(self, x):
return defaultboundaryhandling(x, 100.)
class _FSphere(BBOBFunction):
"""Abstract Sphere function.
Method boundaryhandling needs to be defined.
"""
rrseed = 1
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
# COMPUTATION core
ftrue = np.sum(x**2, -1)
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F1(_FSphere, BBOBNfreeFunction):
"""Noise-free Sphere function"""
funId = 1
def boundaryhandling(self, x):
return 0.
class F101(_FSphere, BBOBGaussFunction):
"""Sphere with moderate Gauss noise"""
funId = 101
gaussbeta = 0.01
class F102(_FSphere, BBOBUniformFunction):
"""Sphere with moderate uniform noise"""
funId = 102
unifalphafac = 0.01
unifbeta = 0.01
class F103(_FSphere, BBOBCauchyFunction):
"""Sphere with moderate Cauchy noise"""
funId = 103
cauchyalpha = 0.01
cauchyp = 0.05
class F107(_FSphere, BBOBGaussFunction):
"""Sphere with Gauss noise"""
funId = 107
gaussbeta = 1.
class F108(_FSphere, BBOBUniformFunction):
"""Sphere with uniform noise"""
funId = 108
unifalphafac = 1.
unifbeta = 1.
class F109(_FSphere, BBOBCauchyFunction):
"""Sphere with Cauchy noise"""
funId = 109
cauchyalpha = 1.
cauchyp = 0.2
class F2(BBOBNfreeFunction):
"""Separable ellipsoid with monotone transformation
Parameter: condition number (default 1e6)
"""
funId = 2
paramValues = (1e0, 1e6)
condition = 1e6
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
if hasattr(self, 'param') and self.param: # not self.param is None
tmp = self.param
else:
tmp = self.condition
self.scales = tmp ** linspace(0, 1, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
# COMPUTATION core
ftrue = dot(monotoneTFosc(x)**2, self.scales)
fval = self.noise(ftrue) # without noise
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F3(BBOBNfreeFunction):
"""Rastrigin with monotone transformation separable "condition" 10"""
funId = 3
condition = 10.
beta = 0.2
def initwithsize(self, curshape, dim):
# DIM-dependent initialisation
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
self.arrscales = resize(self.scales, curshape)
self.arrexpo = resize(self.beta * linspace(0, 1, dim), curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt
x = monotoneTFosc(x)
idx = (x > 0)
x[idx] = x[idx] ** (1 + self.arrexpo[idx] * np.sqrt(x[idx]))
x = self.arrscales * x
# COMPUTATION core
ftrue = 10 * (self.dim - np.sum(np.cos(2 * np.pi * x), -1)) + np.sum(x ** 2, -1)
fval = self.noise(ftrue) # without noise
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F4(BBOBNfreeFunction):
"""skew Rastrigin-Bueche, condition 10, skew-"condition" 100"""
funId = 4
condition = 10.
alpha = 100.
maxindex = np.inf # 1:2:min(DIM,maxindex) are the skew variables
rrseed = 3
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.xopt[:min(dim, self.maxindex):2] = abs(self.xopt[:min(dim, self.maxindex):2])
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
self.arrscales = resize(self.scales, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
xoutside = np.maximum(0., np.abs(x) - 5) * sign(x)
fpen = 1e2 * np.sum(xoutside**2, -1) # penalty
fadd = fadd + fpen # self.fadd becomes an array
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # shift optimum to zero
x = monotoneTFosc(x)
try:
tmpx = x[:, :min(self.dim, self.maxindex):2] # tmpx is a reference to a part of x
except IndexError:
tmpx = x[:min(self.dim, self.maxindex):2] # tmpx is a reference to a part of x
tmpx[tmpx > 0] = self.alpha ** .5 * tmpx[tmpx > 0] # this modifies x
x = self.arrscales * x # scale while assuming that Xopt == 0
# COMPUTATION core
ftrue = 10 * (self.dim - np.sum(np.cos(2 * np.pi * x), -1)) + np.sum(x ** 2, -1)
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F5(BBOBNfreeFunction):
"""Linear slope"""
funId = 5
alpha = 100.
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim) # TODO: what happens here?
else:
self.xopt = 5 * sign(compute_xopt(self.rseed, dim))
self.scales = -sign(self.xopt) * (self.alpha ** .5) ** linspace(0, 1, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
fadd = fadd + 5 * np.sum(np.abs(self.scales))
# BOUNDARY HANDLING
# move "too" good coordinates back into domain
x = np.array(x) # convert x and make a copy of x.
#The following may modify x directly.
idx_out_of_bounds = (x * self.arrxopt) > 25 # 25 == 5 * 5
x[idx_out_of_bounds] = sign(x[idx_out_of_bounds]) * 5
# TRANSFORMATION IN SEARCH SPACE
# COMPUTATION core
ftrue = dot(x, self.scales)
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F6(BBOBNfreeFunction):
"""Attractive sector function"""
funId = 6
condition = 10.
alpha = 100.
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# decouple scaling from function definition
self.linearTF = dot(self.linearTF, self.rotation)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.linearTF) # TODO: check
# COMPUTATION core
idx = (x * self.arrxopt) > 0
x[idx] = self.alpha * x[idx]
ftrue = monotoneTFosc(np.sum(x**2, -1)) ** .9
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class _FStepEllipsoid(BBOBFunction):
"""Abstract Step-ellipsoid, condition 100
Method boundaryhandling needs to be defined.
"""
rrseed = 7
condition = 100.
alpha = 10.
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = self.condition ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim),
diag(((self.condition/10.)**.5) ** linspace(0, 1, dim)))
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.linearTF)
try:
x1 = x[:, 0]
except IndexError:
x1 = x[0]
idx = np.abs(x) > .5
x[idx] = np.round(x[idx])
x[np.negative(idx)] = np.round(self.alpha * x[np.negative(idx)]) / self.alpha
x = dot(x, self.rotation)
# COMPUTATION core
ftrue = .1 * np.maximum(1e-4 * np.abs(x1), dot(x ** 2, self.scales))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F7(_FStepEllipsoid, BBOBNfreeFunction):
"""Step-ellipsoid, condition 100, noise-free"""
funId = 7
def boundaryhandling(self, x):
return defaultboundaryhandling(x, 1.)
class F113(_FStepEllipsoid, BBOBGaussFunction):
"""Step-ellipsoid with gauss noise, condition 100"""
funId = 113
gaussbeta = 1.
class F114(_FStepEllipsoid, BBOBUniformFunction):
"""Step-ellipsoid with uniform noise, condition 100"""
funId = 114
unifalphafac = 1.
unifbeta = 1.
class F115(_FStepEllipsoid, BBOBCauchyFunction):
"""Step-ellipsoid with Cauchy noise, condition 100"""
funId = 115
cauchyalpha = 1.
cauchyp = 0.2
class _FRosenbrock(BBOBFunction):
"""Abstract Rosenbrock, non-rotated
Method boundaryhandling needs to be defined.
"""
rrseed = 8
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = .75 * compute_xopt(self.rseed, dim) # different from all others
self.scales = max(1, dim ** .5 / 8.)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= self.arrxopt!
x = self.scales * x
x = x + 1 # shift zero to factual optimum 1
# COMPUTATION core
try:
ftrue = (1e2 * np.sum((x[:, :-1] ** 2 - x[:, 1:]) ** 2, -1) +
np.sum((x[:, :-1] - 1.) ** 2, -1))
except IndexError:
ftrue = (1e2 * np.sum((x[:-1] ** 2 - x[1:]) ** 2) +
np.sum((x[:-1] - 1.) ** 2))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F8(_FRosenbrock, BBOBNfreeFunction):
"""Rosenbrock noise-free"""
funId = 8
def boundaryhandling(self, x):
return 0.
class F104(_FRosenbrock, BBOBGaussFunction):
"""Rosenbrock non-rotated with moderate Gauss noise"""
funId = 104
gaussbeta = 0.01
class F105(_FRosenbrock, BBOBUniformFunction):
"""Rosenbrock non-rotated with moderate uniform noise"""
funId = 105
unifalphafac = 0.01
unifbeta = 0.01
class F106(_FRosenbrock, BBOBCauchyFunction):
"""Rosenbrock non-rotated with moderate Cauchy noise"""
funId = 106
cauchyalpha = 0.01
cauchyp = 0.05
class F110(_FRosenbrock, BBOBGaussFunction):
"""Rosenbrock non-rotated with Gauss noise"""
funId = 110
gaussbeta = 1.
class F111(_FRosenbrock, BBOBUniformFunction):
"""Rosenbrock non-rotated with uniform noise"""
funId = 111
unifalphafac = 1.
unifbeta = 1.
class F112(_FRosenbrock, BBOBCauchyFunction):
"""Rosenbrock non-rotated with Cauchy noise"""
funId = 112
cauchyalpha = 1.
cauchyp = 0.2
class F9(BBOBNfreeFunction):
"""Rosenbrock, rotated"""
funId = 9
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
scale = max(1, dim ** .5 / 8.) # nota: different from scales in F8
self.linearTF = scale * compute_rotation(self.rseed, dim)
self.xopt = np.hstack(dot(.5 * np.ones((1, dim)), self.linearTF.T)) / scale ** 2
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
# TRANSFORMATION IN SEARCH SPACE
x = dot(x, self.linearTF) + 0.5 # different from F8
# COMPUTATION core
try:
ftrue = (1e2 * np.sum((x[:, :-1] ** 2 - x[:, 1:]) ** 2, -1) +
np.sum((x[:, :-1] - 1.) ** 2, -1))
except IndexError:
ftrue = (1e2 * np.sum((x[:-1] ** 2 - x[1:]) ** 2) +
np.sum((x[:-1] - 1.) ** 2))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class _FEllipsoid(BBOBFunction):
"""Abstract Ellipsoid with monotone transformation.
Method boundaryhandling needs to be defined.
"""
rrseed = 10
condition = 1e6
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = self.condition ** linspace(0, 1, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.rotation)
x = monotoneTFosc(x)
# COMPUTATION core
ftrue = dot(x ** 2, self.scales)
try:
ftrue = np.hstack(ftrue)
except TypeError: # argument 2 to map() must support iteration
pass
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F10(_FEllipsoid, BBOBNfreeFunction):
"""Ellipsoid with monotone transformation, condition 1e6"""
funId = 10
condition = 1e6
def boundaryhandling(self, x):
return 0.
class F116(_FEllipsoid, BBOBGaussFunction):
"""Ellipsoid with Gauss noise, monotone x-transformation, condition 1e4"""
funId = 116
condition = 1e4
gaussbeta = 1.
class F117(_FEllipsoid, BBOBUniformFunction):
"""Ellipsoid with uniform noise, monotone x-transformation, condition 1e4"""
funId = 117
condition = 1e4
unifalphafac = 1.
unifbeta = 1.
class F118(_FEllipsoid, BBOBCauchyFunction):
"""Ellipsoid with Cauchy noise, monotone x-transformation, condition 1e4"""
funId = 118
condition = 1e4
cauchyalpha = 1.
cauchyp = 0.2
class F11(BBOBNfreeFunction):
"""Discus (tablet) with monotone transformation, condition 1e6"""
funId = 11
condition = 1e6
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.rotation)
x = monotoneTFosc(x)
# COMPUTATION core
try:
ftrue = np.sum(x**2, -1) + (self.condition - 1.) * x[:, 0] ** 2
except IndexError:
ftrue = np.sum(x**2) + (self.condition - 1.) * x[0] ** 2
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F12(BBOBNfreeFunction):
"""Bent cigar with asymmetric space distortion, condition 1e6"""
funId = 12
condition = 1e6
beta = .5
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed + 1e6, dim) # different from others
self.rotation = compute_rotation(self.rseed + 1e6, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
self.arrexpo = resize(self.beta * linspace(0, 1, dim), curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.rotation) # no scaling here, because it would go to the arrExpo
idx = x > 0
x[idx] = x[idx] ** (1 + self.arrexpo[idx] * np.sqrt(x[idx]))
x = dot(x, self.rotation)
# COMPUTATION core
try:
ftrue = self.condition * np.sum(x**2, -1) + (1 - self.condition) * x[:, 0] ** 2
except IndexError:
ftrue = self.condition * np.sum(x**2) + (1 - self.condition) * x[0] ** 2
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F13(BBOBNfreeFunction):
"""Sharp ridge"""
funId = 13
condition = 10.
alpha = 100. # slope
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
self.linearTF = dot(self.linearTF, self.rotation)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.linearTF)
# COMPUTATION core
try:
ftrue = x[:, 0] ** 2 + self.alpha * np.sqrt(np.sum(x[:, 1:] ** 2, -1))
except IndexError:
ftrue = x[0] ** 2 + self.alpha * np.sqrt(np.sum(x[1:] ** 2, -1))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class _FDiffPow(BBOBFunction):
"""Abstract Sum of different powers, between x^2 and x^6.
Method boundaryhandling needs to be defined.
"""
alpha = 4.
rrseed = 14
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
self.arrexpo = resize(2. + self.alpha * linspace(0, 1, dim), curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.rotation)
# COMPUTATION core
ftrue = np.sqrt(np.sum(np.abs(x) ** self.arrexpo, -1))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F14(_FDiffPow, BBOBNfreeFunction):
"""Sum of different powers, between x^2 and x^6, noise-free"""
funId = 14
def boundaryhandling(self, x):
return 0.
class F119(_FDiffPow, BBOBGaussFunction):
"""Sum of different powers with Gauss noise, between x^2 and x^6"""
funId = 119
gaussbeta = 1.
class F120(_FDiffPow, BBOBUniformFunction):
"""Sum of different powers with uniform noise, between x^2 and x^6"""
funId = 120
unifalphafac = 1.
unifbeta = 1.
class F121(_FDiffPow, BBOBCauchyFunction):
"""Sum of different powers with seldom Cauchy noise, between x^2 and x^6"""
funId = 121
cauchyalpha = 1.
cauchyp = 0.2
class F15(BBOBNfreeFunction):
"""Rastrigin with asymmetric non-linear distortion, "condition" 10"""
funId = 15
condition = 10.
beta = 0.2
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# decouple scaling from function definition
self.linearTF = dot(self.linearTF, self.rotation)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
self.arrexpo = resize(self.beta * linspace(0, 1, dim), curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.rotation) # no scaling here, because it would go to the arrexpo
x = monotoneTFosc(x)
idx = x > 0.
x[idx] = x[idx] ** (1. + self.arrexpo[idx] * np.sqrt(x[idx])) # smooth in zero
x = dot(x, self.linearTF)
# COMPUTATION core
ftrue = 10. * (dim - np.sum(np.cos(2 * np.pi * x), -1)) + np.sum(x ** 2, -1)
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F16(BBOBNfreeFunction):
"""Weierstrass, condition 100"""
funId = 16
condition = 100.
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (1. / self.condition ** .5) ** linspace(0, 1, dim) # CAVE?
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# decouple scaling from function definition
self.linearTF = dot(self.linearTF, self.rotation)
K = np.arange(0, 12)
self.aK = np.reshape(0.5 ** K, (1, 12))
self.bK = np.reshape(3. ** K, (1, 12))
self.f0 = np.sum(self.aK * np.cos(2 * np.pi * self.bK * 0.5)) # optimal value
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
xoutside = np.maximum(0, np.abs(x) - 5.) * sign(x)
fpen = (10. / dim) * np.sum(xoutside ** 2, -1)
fadd = fadd + fpen
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.rotation)
x = monotoneTFosc(x)
x = dot(x, self.linearTF)
# COMPUTATION core
if len(curshape) < 2: # popsize is one
ftrue = np.sum(dot(self.aK, np.cos(dot(self.bK.T, 2 * np.pi * (np.reshape(x, (1, len(x))) + 0.5)))))
else:
ftrue = np.zeros(curshape[0]) # curshape[0] is popsize
for k, i in enumerate(x):
# TODO: simplify next line
ftrue[k] = np.sum(dot(self.aK, np.cos(dot(self.bK.T, 2 * np.pi * (np.reshape(i, (1, len(i))) + 0.5)))))
ftrue = 10. * (ftrue / dim - self.f0) ** 3
try:
ftrue = np.hstack(ftrue)
except TypeError:
pass
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class _FSchaffersF7(BBOBFunction):
"""Abstract Schaffers F7 with asymmetric non-linear transformation, condition 10
Class attribute condition and method boundaryhandling need to be defined.
"""
rrseed = 17
condition = None
beta = 0.5
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1 , dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
self.arrexpo = resize(self.beta * linspace(0, 1, dim), curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.rotation)
idx = x > 0
x[idx] = x[idx] ** (1 + self.arrexpo[idx] * np.sqrt(x[idx]))
x = dot(x, self.linearTF)
# COMPUTATION core
try:
s = x[:, :-1] ** 2 + x[:, 1:] ** 2
except IndexError:
s = x[:-1] ** 2 + x[1:] ** 2
ftrue = np.mean(s ** .25 * (np.sin(50 * s ** .1) ** 2 + 1), -1) ** 2
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F17(_FSchaffersF7, BBOBNfreeFunction):
"""Schaffers F7 with asymmetric non-linear transformation, condition 10"""
funId = 17
condition = 10.
def boundaryhandling(self, x):
return defaultboundaryhandling(x, 10.)
class F18(_FSchaffersF7, BBOBNfreeFunction):
"""Schaffers F7 with asymmetric non-linear transformation, condition 1000"""
funId = 18
condition = 1000.
def boundaryhandling(self, x):
return defaultboundaryhandling(x, 10.)
class F122(_FSchaffersF7, BBOBGaussFunction):
"""Schaffers F7 with Gauss noise, with asymmetric non-linear transformation, condition 10"""
funId = 122
condition = 10.
gaussbeta = 1.
class F123(_FSchaffersF7, BBOBUniformFunction):
"""Schaffers F7 with uniform noise, asymmetric non-linear transformation, condition 10"""
funId = 123
condition = 10.
unifalphafac = 1.
unifbeta = 1.
class F124(_FSchaffersF7, BBOBCauchyFunction): # TODO: check boundary handling
"""Schaffers F7 with seldom Cauchy noise, asymmetric non-linear transformation, condition 10"""
funId = 124
condition = 10.
cauchyalpha = 1.
cauchyp = 0.2
class _F8F2(BBOBFunction):
"""Abstract F8F2 sum of Griewank-Rosenbrock 2-D blocks
Class attribute facftrue and method boundaryhandling need to be defined.
"""
facftrue = None
rrseed = 19
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
scale = max(1, dim ** .5 / 8.)
self.linearTF = scale * compute_rotation(self.rseed, dim)
#if self.zerox:
# self.xopt = zeros(dim) # does not work here
#else:
# TODO: clean this line
self.xopt = np.hstack(dot(self.linearTF, 0.5 * np.ones((dim, 1)) / scale ** 2))
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = dot(x, self.linearTF) + 0.5 # cannot be replaced with x -= arrxopt!
# COMPUTATION core
try:
f2 = 100. * (x[:, :-1] ** 2 - x[:, 1:]) ** 2 + (1. - x[:, :-1]) ** 2
except IndexError:
f2 = 100. * (x[:-1] ** 2 - x[1:]) ** 2 + (1. - x[:-1]) ** 2
ftrue = self.facftrue + self.facftrue * np.sum(f2 / 4000. - np.cos(f2), -1) / (dim - 1.)
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F19(_F8F2, BBOBNfreeFunction):
"""F8F2 sum of Griewank-Rosenbrock 2-D blocks, noise-free"""
funId = 19
facftrue = 10.
def boundaryhandling(self, x):
return 0.
class F125(_F8F2, BBOBGaussFunction):
"""F8F2 sum of Griewank-Rosenbrock 2-D blocks with Gauss noise"""
funId = 125
facftrue = 1.
gaussbeta = 1.
class F126(_F8F2, BBOBUniformFunction):
"""F8F2 sum of Griewank-Rosenbrock 2-D blocks with uniform noise"""
funId = 126
facftrue = 1.
unifalphafac = 1.
unifbeta = 1.
class F127(_F8F2, BBOBCauchyFunction):
"""F8F2 sum of Griewank-Rosenbrock 2-D blocks with seldom Cauchy noise"""
funId = 127
facftrue = 1.
cauchyalpha = 1.
cauchyp = 0.2
class F20(BBOBNfreeFunction):
"""Schwefel with tridiagonal variable transformation"""
funId = 20
condition = 10.
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = 0.5 * sign(unif(dim, self.rseed) - 0.5) * 4.2096874633
self.scales = (self.condition ** .5) ** np.linspace(0, 1, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(2 * np.abs(self.xopt), curshape)
self.arrscales = resize(self.scales, curshape)
self.arrsigns = resize(sign(self.xopt), curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# TRANSFORMATION IN SEARCH SPACE
x = 2 * self.arrsigns * x # makes the below boundary handling effective for coordinates
try:
x[:, 1:] = x[:, 1:] + .25 * (x[:, :-1] - self.arrxopt[:, :-1])
except IndexError:
x[1:] = x[1:] + .25 * (x[:-1] - self.arrxopt[:-1])
x = 100. * (self.arrscales * (x - self.arrxopt) + self.arrxopt)
# BOUNDARY HANDLING
xoutside = np.maximum(0., np.abs(x) - 500.) * sign(x) # in [-500, 500]
fpen = 0.01 * np.sum(xoutside ** 2, -1)
fadd = fadd + fpen
# COMPUTATION core
ftrue = 0.01 * ((418.9828872724339) - np.mean(x * np.sin(np.sqrt(np.abs(x))), -1))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class _FGallagher(BBOBFunction):
"""Abstract Gallagher with nhighpeaks Gaussian peaks, condition up to 1000, one global rotation
Attribute fac2, nhighpeaks, highpeakcond and method boundary
handling need to be defined.
"""
rrseed = 21
maxcondition = 1000.
fitvalues = (1.1, 9.1)
fac2 = None # added: factor for xopt not too close to boundaries, used by F22
nhighpeaks = None
highpeakcond = None
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
self.rotation = compute_rotation(self.rseed, dim)
arrcondition = self.maxcondition ** linspace(0, 1, self.nhighpeaks - 1)
idx = np.argsort(unif(self.nhighpeaks - 1, self.rseed)) # random permutation
arrcondition = np.insert(arrcondition[idx], 0, self.highpeakcond)
self.arrscales = []
for i, e in enumerate(arrcondition):
s = e ** linspace(-.5, .5, dim)
idx = np.argsort(unif(dim, self.rseed + 1e3 * i)) # permutation instead of rotation
self.arrscales.append(s[idx]) # this is inverse Cov
self.arrscales = np.vstack(self.arrscales)
# compute peak values, 10 is global optimum
self.peakvalues = np.insert(linspace(self.fitvalues[0], self.fitvalues[1], self.nhighpeaks - 1), 0, 10.)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.xlocal = dot(self.fac2 * np.reshape(10. * unif(dim * self.nhighpeaks, self.rseed) - 5., (self.nhighpeaks, dim)),
self.rotation)
if self.zerox:
self.xlocal[0, :] = zeros(dim)
else:
# global optimum not too close to boundary
self.xlocal[0, :] = 0.8 * self.xlocal[0, :]
self.xopt = dot(self.xlocal[0, :], self.rotation.T)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = dot(x, self.rotation)
# COMPUTATION core
fac = -0.5 / dim
# f = NaN(nhighpeaks, popsi)
# TODO: optimize
if len(curshape) < 2: # popsize is 1 in this case
f = np.zeros(self.nhighpeaks)
xx = tile(x, (self.nhighpeaks, 1)) - self.xlocal
f[:] = self.peakvalues * np.exp(fac * np.sum(self.arrscales * xx ** 2, 1))
elif curshape[0] < .5 * self.nhighpeaks:
f = np.zeros((curshape[0], self.nhighpeaks))
for k, e in enumerate(x):
xx = tile(e, (self.nhighpeaks, 1)) - self.xlocal
f[k, :] = self.peakvalues * np.exp(fac * np.sum(self.arrscales * xx ** 2, 1))
else:
f = np.zeros((curshape[0], self.nhighpeaks))
for i in range(self.nhighpeaks):
xx = (x - tile(self.xlocal[i, :], (curshape[0], 1)))
f[:, i] = self.peakvalues[i] * np.exp(fac * (dot(xx ** 2, self.arrscales[i, :])))
ftrue = monotoneTFosc(10 - np.max(f, -1)) ** 2
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F21(_FGallagher, BBOBNfreeFunction):
"""Gallagher with 101 Gaussian peaks, condition up to 1000, one global rotation, noise-free"""
funId = 21
nhighpeaks = 101
fac2 = 1.
highpeakcond = 1000. ** .5
def boundaryhandling(self, x):
return defaultboundaryhandling(x, 1.)
class F22(_FGallagher, BBOBNfreeFunction):
"""Gallagher with 21 Gaussian peaks, condition up to 1000, one global rotation"""
funId = 22
rrseed = 22
nhighpeaks = 21
fac2 = 0.98
highpeakcond = 1000.
def boundaryhandling(self, x):
return defaultboundaryhandling(x, 1.)
class F128(_FGallagher, BBOBGaussFunction): # TODO: check boundary handling
"""Gallagher with 101 Gaussian peaks with Gauss noise, condition up to 1000, one global rotation"""
funId = 128
nhighpeaks = 101
fac2 = 1.
highpeakcond = 1000. ** .5
gaussbeta = 1.
class F129(_FGallagher, BBOBUniformFunction):
"""Gallagher with 101 Gaussian peaks with uniform noise, condition up to 1000, one global rotation"""
funId = 129
nhighpeaks = 101
fac2 = 1.
highpeakcond = 1000. ** .5
unifalphafac = 1.
unifbeta = 1.
class F130(_FGallagher, BBOBCauchyFunction):
"""Gallagher with 101 Gaussian peaks with seldom Cauchy noise, condition up to 1000, one global rotation"""
funId = 130
nhighpeaks = 101
fac2 = 1.
highpeakcond = 1000. ** .5
cauchyalpha = 1.
cauchyp = 0.2
class F23(BBOBNfreeFunction):
"""Katsuura function"""
funId = 23
condition = 100.
arr2k = np.reshape(2. ** (np.arange(1, 33)), (1, 32)) # bug-fix for 32-bit (NH): 2 -> 2. (relevance is minor)
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# decouple scaling from function definition
self.linearTF = dot(self.linearTF, self.rotation)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
xoutside = np.maximum(0, np.abs(x) - 5.) * sign(x)
fpen = np.sum(xoutside ** 2, -1)
fadd = fadd + fpen
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.linearTF)
# COMPUTATION core
if len(curshape) < 2: # popsize is 1 in this case
arr = dot(np.reshape(x, (dim, 1)), self.arr2k) # dim times d array
ftrue = (-10. / dim ** 2. +
10. / dim ** 2. *
np.prod(1 + np.arange(1, dim + 1) * np.dot(np.abs(arr - np.round(arr)), self.arr2k.T ** -1.).T) ** (10. / dim ** 1.2))
else:
ftrue = zeros(curshape[0])
for k, e in enumerate(x):
arr = dot(np.reshape(e, (dim, 1)), self.arr2k) # dim times d array
ftrue[k] = (-10. / dim ** 2. +
10. / dim ** 2. *
np.prod(1 + np.arange(1, dim + 1) * np.dot(np.abs(arr - np.round(arr)), self.arr2k.T ** -1.).T) ** (10. / dim ** 1.2))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F24(BBOBNfreeFunction):
"""Lunacek bi-Rastrigin, condition 100
in PPSN 2008, Rastrigin part rotated and scaled
"""
funId = 24
condition = 100.
_mu1 = 2.5
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = .5 * self._mu1 * sign(gauss(dim, self.rseed))
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# decouple scaling from function definition
self.linearTF = dot(self.linearTF, self.rotation)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
#self.arrxopt = resize(self.xopt, curshape)
self.arrscales = resize(2. * sign(self.xopt), curshape) # makes up for xopt
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
xoutside = np.maximum(0, np.abs(x) - 5.) * sign(x)
fpen = 1e4 * np.sum(xoutside ** 2, -1)
fadd = fadd + fpen
# TRANSFORMATION IN SEARCH SPACE
x = self.arrscales * x
# COMPUTATION core
s = 1 - .5 / ((dim + 20) ** .5 - 4.1) # tested up to DIM = 160 p in [0.25,0.33]
d = 1 # shift [1,3], smaller is more difficult
mu2 = -((self._mu1 ** 2 - d) / s) ** .5
ftrue = np.minimum(np.sum((x - self._mu1) ** 2, -1),
d * dim + s * np.sum((x - mu2) ** 2, -1))
ftrue = ftrue + 10 * (dim - np.sum(np.cos(2 * np.pi * dot(x - self._mu1, self.linearTF)), -1))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
#dictbbob = {'sphere': F1, 'ellipsoid': F2, 'Rastrigin': F3}
nfreefunclasses = (F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14,
F15, F16, F17, F18, F19, F20, F21, F22, F23, F24) # hard coded
noisyfunclasses = (F101, F102, F103, F104, F105, F106, F107, F108, F109, F110,
F111, F112, F113, F114, F115, F116, F117, F118, F119, F120,
F121, F122, F123, F124, F125, F126, F127, F128, F129, F130)
dictbbobnfree = dict((i.funId, i) for i in nfreefunclasses)
nfreeIDs = sorted(dictbbobnfree.keys()) # was: "nfreenames"
nfreeinfos = [str(i) + ': ' + dictbbobnfree[i].__doc__ for i in nfreeIDs]
dictbbobnoisy = dict((i.funId, i) for i in noisyfunclasses)
noisyIDs = sorted(dictbbobnoisy.keys()) # was noisynames
funclasses = list(nfreefunclasses) + list(noisyfunclasses)
dictbbob = dict((i.funId, i) for i in funclasses)
#TODO: pb xopt f9, 21, 22
class _FTemplate(BBOBNfreeFunction):
"""Template based on F1"""
funId = 421337
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
self.linearTf = None
self.rotation = None
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
# COMPUTATION core
ftrue = np.sum(x**2, 1)
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
def instantiate(ifun, iinstance=0, param=None, **kwargs):
"""Returns test function ifun, by default instance 0."""
res = dictbbob[ifun](iinstance=iinstance, param=param, **kwargs) # calling BBOBFunction.__init__(iinstance, param,...)
return res, res.fopt
def get_param(ifun):
"""Returns the parameter values of the function ifun."""
try:
return dictbbob[ifun].paramValues
except AttributeError:
return (None, )
if __name__ == "__main__":
import doctest
doctest.testmod() # run all doctests in this module
|
PyQuake/earthquakemodels
|
code/cocobbob/coco/deapbbob/bbobbenchmarks.py
|
Python
|
bsd-3-clause
| 70,666
|
[
"Gaussian"
] |
7ec6815dc9628abdb340773f4577dbf380ba5863a3bef6114e711e6791d4d934
|
"""
==================
ModEM
==================
# Generate files for ModEM
# revised by JP 2017
# revised by AK 2017 to bring across functionality from ak branch
"""
import os
import numpy as np
from mtpy.utils.mtpylog import MtPyLog
from .exception import CovarianceError
from .model import Model
try:
from evtk.hl import gridToVTK
except ImportError:
print ('If you want to write a vtk file for 3d viewing, you need download '
'and install evtk from https://bitbucket.org/pauloh/pyevtk')
__all__ = ['Covariance']
class Covariance(object):
"""
read and write covariance files
"""
def __init__(self, grid_dimensions=None, **kwargs):
self._logger = MtPyLog.get_mtpy_logger(self.__class__.__name__)
self.grid_dimensions = grid_dimensions
self.smoothing_east = 0.3
self.smoothing_north = 0.3
self.smoothing_z = 0.3
self.smoothing_num = 1
self.exception_list = []
self.mask_arr = None
self.save_path = os.getcwd()
self.cov_fn_basename = 'covariance.cov'
self.cov_fn = None
self._header_str = '\n'.join(['+{0}+'.format('-' * 77),
'| This file defines model covariance for a recursive autoregression scheme. |',
'| The model space may be divided into distinct areas using integer masks. |',
'| Mask 0 is reserved for air; mask 9 is reserved for ocean. Smoothing between |',
'| air, ocean and the rest of the model is turned off automatically. You can |',
'| also define exceptions to override smoothing between any two model areas. |',
'| To turn off smoothing set it to zero. This header is 16 lines long. |',
'| 1. Grid dimensions excluding air layers (Nx, Ny, NzEarth) |',
'| 2. Smoothing in the X direction (NzEarth real values) |',
'| 3. Smoothing in the Y direction (NzEarth real values) |',
'| 4. Vertical smoothing (1 real value) |',
'| 5. Number of times the smoothing should be applied (1 integer >= 0) |',
'| 6. Number of exceptions (1 integer >= 0) |',
'| 7. Exceptions in the for e.g. 2 3 0. (to turn off smoothing between 3 & 4) |',
'| 8. Two integer layer indices and Nx x Ny block of masks, repeated as needed.|',
'+{0}+'.format('-' * 77)])
for key in list(kwargs.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
else:
self._logger.warn("Argument {}={} is not supportted thus not been set.".format(key, kwargs[key]))
def write_covariance_file(self, cov_fn=None, save_path=None,
cov_fn_basename=None, model_fn=None,
sea_water=0.3, air=1e12): #
"""
write a covariance file
"""
if model_fn is not None:
mod_obj = Model()
mod_obj.read_model_file(model_fn)
# update save_path from model path if not provided separately
if save_path is None:
save_path = os.path.dirname(model_fn)
print('Reading {0}'.format(model_fn))
self.grid_dimensions = mod_obj.res_model.shape
if self.mask_arr is None:
self.mask_arr = np.ones_like(mod_obj.res_model)
self.mask_arr[np.where(mod_obj.res_model >= air * .9)] = 0
self.mask_arr[np.where((mod_obj.res_model <= sea_water * 1.1) &
(mod_obj.res_model >= sea_water * .9))] = 9
if self.grid_dimensions is None:
raise CovarianceError('Grid dimensions are None, input as (Nx, Ny, Nz)')
if cov_fn is not None:
self.cov_fn = cov_fn
else:
if save_path is not None:
self.save_path = save_path
if cov_fn_basename is not None:
self.cov_fn_basename = cov_fn_basename
self.cov_fn = os.path.join(self.save_path, self.cov_fn_basename)
clines = [self._header_str, '\n\n', ' {0:<10}{1:<10}{2:<10}\n'.format(self.grid_dimensions[0],
self.grid_dimensions[1],
self.grid_dimensions[2]), '\n']
# --> grid dimensions
# --> smoothing in north direction
n_smooth_line = ''
for zz in range(self.grid_dimensions[2]):
if not np.iterable(self.smoothing_north):
n_smooth_line += ' {0:<5.2f}'.format(self.smoothing_north)
else:
n_smooth_line += ' {0:<5.2f}'.format(self.smoothing_north[zz])
clines.append(n_smooth_line + '\n')
# --> smoothing in east direction
e_smooth_line = ''
for zz in range(self.grid_dimensions[2]):
if not np.iterable(self.smoothing_east):
e_smooth_line += ' {0:<5.2f}'.format(self.smoothing_east)
else:
e_smooth_line += ' {0:<5.2f}'.format(self.smoothing_east[zz])
clines.append(e_smooth_line + '\n')
# --> smoothing in vertical direction
clines.append(' {0:<5.2f}\n'.format(self.smoothing_z))
clines.append('\n')
# --> number of times to apply smoothing
clines.append(' {0:<2.0f}\n'.format(self.smoothing_num))
clines.append('\n')
# --> exceptions
clines.append(' {0:<.0f}\n'.format(len(self.exception_list)))
for exc in self.exception_list:
clines.append('{0:<5.0f}{1:<5.0f}{2:<5.0f}\n'.format(exc[0],
exc[1],
exc[2]))
clines.append('\n')
clines.append('\n')
# --> mask array
if self.mask_arr is None:
self.mask_arr = np.ones((self.grid_dimensions[0],
self.grid_dimensions[1],
self.grid_dimensions[2]))
# need to flip north and south.
write_mask_arr = self.mask_arr[::-1, :, :].copy()
for zz in range(self.mask_arr.shape[2]):
clines.append(' {0:<8.0f}{0:<8.0f}\n'.format(zz + 1))
for nn in range(self.mask_arr.shape[0]):
cline = ''
for ee in range(self.mask_arr.shape[1]):
cline += '{0:^3.0f}'.format(write_mask_arr[nn, ee, zz])
clines.append(cline + '\n')
with open(self.cov_fn, 'w') as cfid:
cfid.writelines(clines)
# not needed cfid.close()
self._logger.info('Wrote covariance file to {0}'.format(self.cov_fn))
def read_cov_file(self, cov_fn):
"""
read a covariance file
"""
if not os.path.isfile(cov_fn):
raise CovarianceError('{0} not found, check path'.format(cov_fn))
self.cov_fn = cov_fn
self.save_path = os.path.dirname(self.cov_fn)
self.cov_fn_basename = os.path.basename(self.cov_fn)
with open(cov_fn, 'r') as fid:
lines = fid.readlines()
num_find = False
east_find = False
north_find = False
count = 0
for line in lines:
if line.find('+') >= 0 or line.find('|') >= 0:
continue
else:
line_list = line.strip().split()
if len(line_list) == 0:
continue
elif len(line_list) == 1 and not num_find and line_list[0].find('.') == -1:
self.smoothing_num = int(line_list[0])
num_find = True
elif len(line_list) == 1 and num_find and line_list[0].find('.') == -1:
self.exceptions_num = int(line_list[0])
elif len(line_list) == 1 and line_list[0].find('.') >= 0:
self.smoothing_z = float(line_list[0])
elif len(line_list) == 3:
nx, ny, nz = [int(ii) for ii in line_list]
self.grid_dimensions = (nx, ny, nz)
self.mask_arr = np.ones((nx, ny, nz), dtype=np.int)
self.smoothing_east = np.zeros(ny)
self.smoothing_north = np.zeros(nx)
elif len(line_list) == 2:
# starts at 1 but python starts at 0
index_00, index_01 = [int(ii) - 1 for ii in line_list]
count = 0
elif line_list[0].find('.') >= 0 and north_find == False:
self.smoothing_north = np.array(line_list, dtype=np.float)
north_find = True
elif line_list[0].find('.') >= 0 and north_find == True:
self.smoothing_east = np.array(line_list, dtype=np.float)
east_find = True
elif north_find and east_find:
line_list = np.array(line_list, dtype=np.int)
line_list = line_list.reshape((ny, 1))
self.mask_arr[count, :, index_00:index_01 + 1] = line_list
count += 1
def get_parameters(self):
parameter_list = ['smoothing_north',
'smoothing_east',
'smoothing_z',
'smoothing_num']
parameter_dict = {}
for parameter in parameter_list:
key = 'covariance.{0}'.format(parameter)
parameter_dict[key] = getattr(self, parameter)
return parameter_dict
def write_cov_vtk_file(self, cov_vtk_fn, model_fn=None, grid_east=None,
grid_north=None, grid_z=None):
"""
write a vtk file of the covariance to match things up
"""
if model_fn is not None:
m_obj = Model()
m_obj.read_model_file(model_fn)
grid_east = m_obj.grid_east
grid_north = m_obj.grid_north
grid_z = m_obj.grid_z
if grid_east is not None:
grid_east = grid_east
if grid_north is not None:
grid_north = grid_north
if grid_z is not None:
grid_z = grid_z
# use cellData, this makes the grid properly as grid is n+1
gridToVTK(cov_vtk_fn,
grid_north / 1000.,
grid_east / 1000.,
grid_z / 1000.,
cellData={'covariance_mask': self.mask_arr})
self._logger.info('Wrote covariance file to {0}\n'.format(cov_vtk_fn))
|
MTgeophysics/mtpy
|
mtpy/modeling/modem/convariance.py
|
Python
|
gpl-3.0
| 11,209
|
[
"VTK"
] |
2e49983590d637a06522cb16c5b2929776d825fee014ba3e42c9cf074b6934ae
|
#!/usr/bin/env python3
# Copyright 2019 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DISCLAIMER: This is a work in progress. This linter was written specifically
# for the P4Runtime specification document and may not be useful for other
# Madoko documents, as it may be making some assumptions as to how the document
# was written.
# TODO: handle Madoko includes (we do not use them for the P4Runtime spec)?
import argparse
from collections import namedtuple
import json
import os.path
import re
import sys
import traceback
DEFAULT_CONF = 'madokolint.conf.json'
LINE_WRAP_LENGTH = 80
parser = argparse.ArgumentParser(description='Lint tool for Madoko code')
parser.add_argument('files', metavar='FILE', type=str, nargs='+',
help='Input files')
parser.add_argument('--conf', type=str,
help='Configuration file for lint tool')
class MadokoFmtError(Exception):
def __init__(self, filename, lineno, description):
self.filename = filename
self.lineno = lineno
self.description = description
def __str__(self):
return "Unexpected Madoko code in file {} at line {}: {}".format(
self.filename, self.lineno, self.description)
class LintState:
def __init__(self):
self.errors_cnt = 0
def error(self, filename, lineno, line, description):
# TODO: print line later?
print("Error in file {} at line {}: {}.".format(filename, lineno, description))
self.errors_cnt += 1
lint_state = LintState()
class LintConf:
class BadConfException(Exception):
def __init__(self, what):
self.what = what
def __str__(self):
return self.what
def __init__(self):
self.keywords = {}
def build_from(self, conf_fp):
try:
conf_d = json.load(conf_fp)
for entry in conf_d['keywords']:
category = entry['category']
for keyword in entry['keywords']:
if keyword in self.keywords:
raise LintConf.BadConfException(
"Keyword '{}' is present multiple times in configuration".format(
keyword))
self.keywords[keyword] = category
except json.JSONDecodeError:
print("Provided configuration file is not a valid JSON file")
sys.exit(1)
except KeyError:
print("Provided JSON configuration file has missing attributes")
traceback.print_exc()
sys.exit(1)
except LintConf.BadConfException as e:
print(str(e))
sys.exit(1)
lint_conf = LintConf()
class Context:
"""A context is an object that is used to determine whether a specific "checker" (check_*
method) should visit a given line."""
def enter(self, line, filename, lineno):
"""Called before visiting a line.
Returns True iff the checker should visit the given line.
"""
return True
def exit(self, line, filename, lineno):
"""Called after visiting a line."""
pass
class ContextSkipBlocks(Context):
"""A context used to only visit Madoko code outside of blocks."""
Block = namedtuple('Block', ['num_tildes', 'name'])
def __init__(self):
self.p_block = re.compile('^ *(?P<tildes>~+) *(?:(?P<cmd>Begin|End)(?: +))?(?P<name>\w+)?')
self.blocks_stack = []
def enter(self, line, filename, lineno):
m = self.p_block.match(line)
if m:
num_tildes = len(m.group("tildes"))
has_begin = m.group("cmd") == "Begin"
has_end = m.group("cmd") == "End"
blockname = m.group("name")
if has_begin:
self.blocks_stack.append(self.Block(num_tildes, blockname))
return False
if has_end:
if not self.blocks_stack:
raise MadokoFmtError(filename, lineno, "Block end line but no block was begun")
expected = self.blocks_stack.pop()
if num_tildes != expected.num_tildes or blockname != expected.name:
raise MadokoFmtError(
filename, lineno,
"Block end line does not match last visited block begin line")
return False
if blockname is None:
if not self.blocks_stack:
raise MadokoFmtError(filename, lineno, "Block end line but no block was begun")
expected = self.blocks_stack.pop()
if num_tildes != expected.num_tildes:
raise MadokoFmtError(
filename, lineno,
"Block end line does not match last visited block begin line")
return False
self.blocks_stack.append(self.Block(num_tildes, blockname))
return False
if self.blocks_stack:
return False
return True
# TODO: would "skip metadata" be more generic?
class ContextAfterTitle(Context):
"""A context used to visit only Madoko code after the [TITLE] block element.
"""
def __init__(self, *args):
self.title_found = False
self.p_title = re.compile('^ *\[TITLE\] *$')
def enter(self, line, filename, lineno):
if self.title_found:
return True
self.title_found = self.p_title.match(line) is not None
return False
class ContextSkipHeadings(Context):
"""A context used to skip headings (lines starting with #)."""
def __init__(self, *args):
self.p_headings = re.compile('^ *#')
def enter(self, line, filename, lineno):
return self.p_headings.match(line) is None
class ContextCompose(Context):
"""A special context used to combine an arbitrary number of contexts."""
def __init__(self, *args):
self.contexts = list(args)
def enter(self, line, filename, lineno):
res = True
for c in self.contexts:
# we use a short-circuit on purpose, if a context returns False we do not even enter
# subsequent contexts. This has some implications on how contexts are used.
res = res and c.enter(line, filename, lineno)
return res
def exit(self, line, filename, lineno):
for c in self.contexts:
c.exit(line, filename, lineno)
def foreach_line(path, context, fn):
"""Iterate over every line in the file. For each line, call fn iff the enter method of the
provided context returns True."""
lineno = 1
with open(path, 'r') as f:
for line in f:
if context.enter(line, path, lineno):
fn(line, lineno)
lineno += 1
context.exit(line, path, lineno)
def check_line_wraps(path):
def check(line, lineno):
if "http" in line: # TODO: we can probably do better than this
return
if len(line) > LINE_WRAP_LENGTH + 1: # +1 for the newline characted
lint_state.error(path, lineno, line,
"is more than {} characters long".format(LINE_WRAP_LENGTH))
foreach_line(path,
ContextCompose(ContextAfterTitle(), ContextSkipBlocks(), ContextSkipHeadings()),
check)
def check_trailing_whitespace(path):
def check(line, lineno):
if len(line) >= 2 and line[-2].isspace():
lint_state.error(path, lineno, line, "trailing whitespace")
foreach_line(path, Context(), check)
def check_predefined_abbreviations(path):
abbreviations = {
'e.g.': '⪚',
'i.e.': '&ie;',
'et al.': '&etal;',
}
def check(line, lineno):
for k, v in abbreviations.items():
if k in line:
lint_state.error(path, lineno, line,
"contains '{}', use '{}' instead".format(k, v))
foreach_line(path, ContextCompose(ContextAfterTitle(), ContextSkipBlocks()), check)
def check_keywords(path):
def check(line, lineno):
for word in line.split():
if word not in lint_conf.keywords:
continue
category = lint_conf.keywords[word]
lint_state.error(
path, lineno, line,
"'{}' is a known keyword ({}), highlight it with backticks".format(word, category))
foreach_line(path, ContextCompose(ContextAfterTitle(), ContextSkipBlocks()), check)
def process_one(path):
check_line_wraps(path)
check_predefined_abbreviations(path)
check_trailing_whitespace(path)
check_keywords(path)
def main():
args = parser.parse_args()
for f in args.files:
if not os.path.isfile(f):
print("'{}' is not a valid file path".format(f))
sys.exit(1)
_, ext = os.path.splitext(f)
if ext != ".mdk":
print("'{}' does not have an .mdk extension")
sys.exit(1)
conf_path = None
if args.conf is not None:
if not os.path.isfile(args.conf):
print("'{}' is not a valid file path".format(args.conf))
sys.exit(1)
conf_path = args.conf
elif os.path.isfile(DEFAULT_CONF): # search working directory
conf_path = DEFAULT_CONF
else: # search directory of Python script
this_dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(this_dir, DEFAULT_CONF)
if os.path.isfile(path):
conf_path = path
if conf_path is not None:
with open(conf_path, 'r') as conf_fp:
lint_conf.build_from(conf_fp)
for f in args.files:
try:
process_one(f)
except MadokoFmtError as e:
print(e)
errors_cnt = lint_state.errors_cnt
print("**********")
print("Errors found: {}".format(errors_cnt))
rc = 0 if errors_cnt == 0 else 2
sys.exit(rc)
if __name__ == '__main__':
main()
|
p4lang/p4runtime
|
tools/madokolint.py
|
Python
|
apache-2.0
| 10,508
|
[
"VisIt"
] |
e7ba314d21bacc43e5ea4b404d24b3983afbe2618188cfaf59f90bb3bf4e8ed8
|
#
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import espressomd
import numpy as np
import espressomd.observables
def cos_persistence_angles(positions):
""" Python implementation for PersistenceAngles observable.
"""
no_of_bonds = positions.shape[0] - 1
no_of_angles = no_of_bonds - 1
bond_vecs = positions[1:] - positions[:-1]
bond_vecs = np.divide(bond_vecs, np.linalg.norm(
bond_vecs, axis=1)[:, np.newaxis])
angles = np.zeros(no_of_angles)
for i in range(no_of_angles):
average = 0.0
for j in range(no_of_angles - i):
average += np.dot(bond_vecs[j], bond_vecs[j + i + 1])
angles[i] = average / (no_of_angles - i)
return angles
class ObservableTests(ut.TestCase):
n_tries = 50
n_parts = 5
box_l = 5.
system = espressomd.System(box_l=3 * [box_l])
system.periodicity = [1, 1, 1]
system.time_step = 0.01
system.cell_system.skin = 0.2 * box_l
def setUp(self):
for i in range(self.n_parts):
self.system.part.add(pos=[1 + i, 1 + i, 1 + i], id=i)
self.partcls = self.system.part.all()
def tearDown(self):
self.system.part.clear()
def test_ParticleDistances(self):
"""
Check ParticleDistances, for a particle pair and for a chain.
"""
pids = list(range(self.n_parts))
obs_single = espressomd.observables.ParticleDistances(ids=[0, 1])
obs_chain = espressomd.observables.ParticleDistances(ids=pids)
# take periodic boundaries into account: bond length cannot exceed
# half the box size along the smallest axis
min_dim = np.min(self.system.box_l)
max_bond_length = min_dim / 2.01
for _ in range(self.n_tries):
# build polymer
pos = np.zeros((self.n_parts, 3), dtype=float)
pos[0] = np.random.uniform(low=0, high=min_dim, size=3)
for i in range(1, self.n_parts):
pos[i] = pos[i - 1] + np.random.uniform(
low=0, high=max_bond_length, size=3)
self.partcls.pos = pos
# expected values
distances = np.linalg.norm(pos[1:] - pos[:-1], axis=1)
# observed values
self.system.integrator.run(0)
res_obs_single = obs_single.calculate()
res_obs_chain = obs_chain.calculate()
# checks
self.assertEqual(np.prod(res_obs_single.shape), 1)
self.assertEqual(np.prod(res_obs_chain.shape), self.n_parts - 1)
self.assertAlmostEqual(res_obs_single[0], distances[0], places=9)
np.testing.assert_array_almost_equal(
res_obs_chain, distances, decimal=9,
err_msg="Data did not agree for observable ParticleDistances")
# check exceptions
for i in range(2):
with self.assertRaises(RuntimeError):
espressomd.observables.ParticleDistances(ids=np.arange(i))
def test_BondAngles(self):
"""
Check BondAngles, for a particle triple and for a chain.
"""
pids = list(range(self.n_parts))
obs_single = espressomd.observables.BondAngles(ids=[0, 1, 2])
obs_chain = espressomd.observables.BondAngles(ids=pids)
# take periodic boundaries into account: bond length cannot exceed
# half the box size along the smallest axis
min_dim = np.min(self.system.box_l)
max_bond_length = min_dim / 2.01
for _ in range(self.n_tries):
# build polymer
pos = np.zeros((self.n_parts, 3), dtype=float)
pos[0] = np.random.uniform(low=0, high=min_dim, size=3)
for i in range(1, self.n_parts):
pos[i] = pos[i - 1] + np.random.uniform(
low=0, high=max_bond_length, size=3)
self.partcls.pos = pos
# expected values
v1 = pos[:-2] - pos[1:-1]
v2 = pos[2:] - pos[1:-1]
l1 = np.linalg.norm(v1, axis=1)
l2 = np.linalg.norm(v2, axis=1)
angles = np.arccos((v1 * v2).sum(1) / l1 / l2)
# observed values
self.system.integrator.run(0)
res_obs_single = obs_single.calculate()
res_obs_chain = obs_chain.calculate()
# checks
self.assertEqual(np.prod(res_obs_single.shape), 1)
self.assertEqual(np.prod(res_obs_chain.shape), self.n_parts - 2)
self.assertAlmostEqual(res_obs_single[0], angles[0], places=9)
np.testing.assert_array_almost_equal(
res_obs_chain, angles, decimal=9,
err_msg="Data did not agree for observable BondAngles")
# check exceptions
for i in range(3):
with self.assertRaises(RuntimeError):
espressomd.observables.BondAngles(ids=np.arange(i))
def test_BondDihedrals(self):
"""
Check BondDihedrals, for a particle quadruple and for a chain.
"""
def rotate_vector(v, k, phi):
"""Rotates vector v around unit vector k by angle phi.
Uses Rodrigues' rotation formula."""
vrot = v * np.cos(phi) + np.cross(k, v) * \
np.sin(phi) + k * np.dot(k, v) * (1.0 - np.cos(phi))
return vrot
def rotate_particle(p2, p3, p4, phi):
"""Rotates particle p4 around the axis formed by the bond
between p2 and p3."""
k = p3 - p2
k /= np.linalg.norm(k)
return p3 + rotate_vector(p4 - p3, k, phi)
def calculate_dihedral(a, b, c, d):
v1 = b - a
v2 = c - b
v3 = d - c
b1 = np.cross(v1, v2)
b2 = np.cross(v2, v3)
u2 = v2 / np.linalg.norm(v2)
return np.arctan2(np.dot(np.cross(b1, b2), u2), np.dot(b1, b2))
def place_particles(bl, offset):
"""Place 5 particles in the XY plane with bond length `bl` and
bond angle = 120 degrees. The chain is then shifted by `offset`."""
phi = 2 * np.pi / 3
pos = np.zeros((self.n_parts, 3), dtype=float)
pos[0] = [bl * np.cos(phi), bl * np.sin(phi), 0.]
pos[1] = [0., 0., 0.]
pos[2] = [bl, 0., 0.]
pos[3] = pos[2] + [bl * np.cos(np.pi - phi), bl * np.sin(phi), 0.]
pos[4] = pos[3] + [bl, 0., 0.]
pos += offset
self.partcls.pos = pos
return pos
pids = list(range(self.n_parts))
obs_single = espressomd.observables.BondDihedrals(ids=pids[:4])
obs_chain = espressomd.observables.BondDihedrals(ids=pids)
# test multiple angles, take periodic boundaries into account
p0, p4 = self.system.part.by_ids([0, 4])
for bond_length in [0.1, self.box_l / 2.0]:
for offset in [1.0, self.box_l / 2.0]:
for phi in np.arange(0, np.pi, np.pi / 6):
# place particles and keep list of unfolded positions
pos = place_particles(bond_length, 3 * [offset])
# rotate the 1st particle
p0.pos = pos[0] = rotate_particle(*pos[1:4, :][::-1],
phi=phi)
# rotate the 5th particle
p4.pos = pos[4] = rotate_particle(*pos[2:5, :], phi=phi)
# expected values
dih1 = calculate_dihedral(*pos[0:4, :][::-1])
dih2 = calculate_dihedral(*pos[1:5, :])
# observed values
self.system.integrator.run(0)
res_obs_single = obs_single.calculate()
res_obs_chain = obs_chain.calculate()
# checks
self.assertEqual(np.prod(res_obs_single.shape), 1)
self.assertEqual(
np.prod(res_obs_chain.shape),
self.n_parts - 3)
self.assertAlmostEqual(res_obs_single[0], dih1, places=9)
np.testing.assert_array_almost_equal(
res_obs_chain, [dih1, dih2], decimal=9,
err_msg="Data did not agree for observable BondDihedrals")
# check exceptions
for i in range(4):
with self.assertRaises(RuntimeError):
espressomd.observables.BondDihedrals(ids=np.arange(i))
def test_CosPersistenceAngles(self):
# First test: compare with python implementation
self.system.part.clear()
partcls = self.system.part.add(pos=np.array(
[np.linspace(0, self.system.box_l[0], 20)] * 3).T + np.random.random((20, 3)))
obs = espressomd.observables.CosPersistenceAngles(
ids=partcls.id)
np.testing.assert_allclose(
obs.calculate(), cos_persistence_angles(partcls.pos))
self.system.part.clear()
# Second test: place particles with fixed angles and check that the
# result of PersistenceAngle.calculate()[i] is i*phi
delta_phi = np.radians(4)
for i in range(10):
pos = [np.cos(i * delta_phi), np.sin(i * delta_phi), 0.0]
self.system.part.add(pos=pos)
new_partcls = self.system.part.all()
obs = espressomd.observables.CosPersistenceAngles(
ids=new_partcls.id)
expected = np.arange(1, 9) * delta_phi
np.testing.assert_allclose(obs.calculate(), np.cos(expected))
# check exceptions
for i in range(3):
with self.assertRaises(RuntimeError):
espressomd.observables.CosPersistenceAngles(ids=np.arange(i))
if __name__ == "__main__":
ut.main()
|
pkreissl/espresso
|
testsuite/python/observable_chain.py
|
Python
|
gpl-3.0
| 10,445
|
[
"ESPResSo"
] |
00947dcb611f2d17df51545d98d7c688fc4758b45bb5b3716762321c24815a9a
|
""" Test class for SiteDirector
"""
# pylint: disable=protected-access
# imports
import datetime
import pytest
from mock import MagicMock
from DIRAC import gLogger
# sut
from DIRAC.WorkloadManagementSystem.Agent.SiteDirector import SiteDirector
mockAM = MagicMock()
mockGCReply = MagicMock()
mockGCReply.return_value = 'TestSetup'
mockOPSObject = MagicMock()
mockOPSObject.getValue.return_value = '123'
mockOPSReply = MagicMock()
mockOPSReply.return_value = '123'
mockOPS = MagicMock()
mockOPS.return_value = mockOPSObject
# mockOPS.Operations = mockOPSObject
mockPM = MagicMock()
mockPM.requestToken.return_value = {'OK': True, 'Value': ('token', 1)}
mockPMReply = MagicMock()
mockPMReply.return_value = {'OK': True, 'Value': ('token', 1)}
mockCSGlobalReply = MagicMock()
mockCSGlobalReply.return_value = 'TestSetup'
mockResourcesReply = MagicMock()
mockResourcesReply.return_value = {'OK': True, 'Value': ['x86_64-slc6', 'x86_64-slc5']}
mockPilotAgentsDB = MagicMock()
mockPilotAgentsDB.setPilotStatus.return_value = {'OK': True}
gLogger.setLevel('DEBUG')
def test__getPilotOptions(mocker):
""" Testing SiteDirector()._getPilotOptions()
"""
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.AgentModule.__init__")
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.gConfig.getValue", side_effect=mockGCReply)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.Operations", side_effect=mockOPS)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.gProxyManager.requestToken", side_effect=mockPMReply)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.AgentModule", side_effect=mockAM)
sd = SiteDirector()
sd.log = gLogger
sd.am_getOption = mockAM
sd.log.setLevel('DEBUG')
sd.queueDict = {'aQueue': {'CEName': 'aCE',
'QueueName': 'aQueue',
'ParametersDict': {'CPUTime': 12345,
'Community': 'lhcb',
'OwnerGroup': ['lhcb_user'],
'Setup': 'LHCb-Production',
'Site': 'LCG.CERN.cern',
'SubmitPool': ''}}}
res = sd._getPilotOptions('aQueue', 10)
assert res[0] == ['-S TestSetup', '-V 123', '-l 123', '-r 1,2,3', '-g 123',
'-o /Security/ProxyToken=token', '-M 1', '-C T,e,s,t,S,e,t,u,p',
'-e 1,2,3', '-N aCE', '-Q aQueue', '-n LCG.CERN.cern']
assert res[1] == 1
@pytest.mark.parametrize("mockMatcherReturnValue, expected, anyExpected, sitesExpected", [
({'OK': False, 'Message': 'boh'},
False, True, set()),
({'OK': True, 'Value': None},
False, True, set()),
({'OK': True, 'Value': {'1': {'Jobs': 10}, '2': {'Jobs': 20}}},
True, True, set()),
({'OK': True, 'Value': {'1': {'Jobs': 10, 'Sites': ['Site1']},
'2': {'Jobs': 20}}},
True, True, set(['Site1'])),
({'OK': True, 'Value': {'1': {'Jobs': 10, 'Sites': ['Site1', 'Site2']},
'2': {'Jobs': 20}}},
True, True, set(['Site1', 'Site2'])),
({'OK': True, 'Value': {'1': {'Jobs': 10, 'Sites': ['Site1', 'Site2']},
'2': {'Jobs': 20, 'Sites': ['Site1']}}},
True, False, set(['Site1', 'Site2'])),
({'OK': True, 'Value': {'1': {'Jobs': 10, 'Sites': ['Site1', 'Site2']},
'2': {'Jobs': 20, 'Sites': ['ANY']}}},
True, False, {'Site1', 'Site2', 'ANY'}),
({'OK': True, 'Value': {'1': {'Jobs': 10, 'Sites': ['Site1', 'Site2']},
'2': {'Jobs': 20, 'Sites': ['ANY', 'Site3']}}},
True, False, {'Site1', 'Site2', 'Site3', 'ANY'}),
({'OK': True, 'Value': {'1': {'Jobs': 10, 'Sites': ['Site1', 'Site2']},
'2': {'Jobs': 20, 'Sites': ['Any', 'Site3']}}},
True, False, {'Site1', 'Site2', 'Site3', 'Any'}),
({'OK': True, 'Value': {'1': {'Jobs': 10, 'Sites': ['Site1', 'Site2']},
'2': {'Jobs': 20, 'Sites': ['NotAny', 'Site2']}}},
True, False, {'Site1', 'Site2', 'NotAny'}),
])
def test__ifAndWhereToSubmit(mocker, mockMatcherReturnValue, expected, anyExpected, sitesExpected):
""" Testing SiteDirector()._ifAndWhereToSubmit()
"""
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.AgentModule.__init__")
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.gConfig.getValue", side_effect=mockGCReply)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.CSGlobals.getSetup", side_effect=mockCSGlobalReply)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.AgentModule", side_effect=mockAM)
sd = SiteDirector()
sd.log = gLogger
sd.am_getOption = mockAM
sd.log.setLevel('DEBUG')
sd.matcherClient = MagicMock()
sd.matcherClient.getMatchingTaskQueues.return_value = mockMatcherReturnValue
res = sd._ifAndWhereToSubmit()
assert res[0] == expected
if res[0]:
assert res == (expected, anyExpected, sitesExpected, set())
def test__allowedToSubmit(mocker):
""" Testing SiteDirector()._allowedToSubmit()
"""
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.AgentModule.__init__")
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.AgentModule", side_effect=mockAM)
sd = SiteDirector()
sd.log = gLogger
sd.am_getOption = mockAM
sd.log.setLevel('DEBUG')
sd.queueDict = {'aQueue': {'Site': 'LCG.CERN.cern',
'CEName': 'aCE',
'QueueName': 'aQueue',
'ParametersDict': {'CPUTime': 12345,
'Community': 'lhcb',
'OwnerGroup': ['lhcb_user'],
'Setup': 'LHCb-Production',
'Site': 'LCG.CERN.cern',
'SubmitPool': ''}}}
submit = sd._allowedToSubmit('aQueue', True, set(['LCG.CERN.cern']), set())
assert submit is False
sd.siteMaskList = ['LCG.CERN.cern', 'DIRAC.CNAF.it']
submit = sd._allowedToSubmit('aQueue', True, set(['LCG.CERN.cern']), set())
assert submit is True
sd.rssFlag = True
submit = sd._allowedToSubmit('aQueue', True, set(['LCG.CERN.cern']), set())
assert submit is False
sd.ceMaskList = ['aCE', 'anotherCE']
submit = sd._allowedToSubmit('aQueue', True, set(['LCG.CERN.cern']), set())
assert submit is True
def test__submitPilotsToQueue(mocker):
""" Testing SiteDirector()._submitPilotsToQueue()
"""
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.AgentModule.__init__")
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.gConfig.getValue", side_effect=mockGCReply)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.CSGlobals.getSetup", side_effect=mockCSGlobalReply)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.AgentModule", side_effect=mockAM)
sd = SiteDirector()
sd.log = gLogger
sd.am_getOption = mockAM
sd.log.setLevel('DEBUG')
sd.rpcMatcher = MagicMock()
sd.rssClient = MagicMock()
sd.workingDirectory = ''
sd.queueDict = {'aQueue': {'Site': 'LCG.CERN.cern',
'CEName': 'aCE',
'CEType': 'SSH',
'QueueName': 'aQueue',
'ParametersDict': {'CPUTime': 12345,
'Community': 'lhcb',
'OwnerGroup': ['lhcb_user'],
'Setup': 'LHCb-Production',
'Site': 'LCG.CERN.cern',
'SubmitPool': ''}}}
sd.queueSlots = {'aQueue': {'AvailableSlots': 10}}
res = sd._submitPilotsToQueue(1, MagicMock(), 'aQueue')
assert res['OK'] is True
assert res['Value'][0] == 0
@pytest.mark.parametrize("pilotRefs, pilotDict, pilotCEDict, expected", [
([], {}, {}, (0, [])),
(['aPilotRef'],
{'aPilotRef': {'Status': 'Running', 'LastUpdateTime': datetime.datetime(2000, 1, 1).utcnow()}},
{},
(0, [])),
(['aPilotRef'],
{'aPilotRef': {'Status': 'Running', 'LastUpdateTime': datetime.datetime(2000, 1, 1).utcnow()}},
{'aPilotRef': 'Running'},
(0, [])),
(['aPilotRef'],
{'aPilotRef': {'Status': 'Running', 'LastUpdateTime': datetime.datetime(2000, 1, 1).utcnow()}},
{'aPilotRef': 'Unknown'},
(0, []))
])
def test__updatePilotStatus(mocker, pilotRefs, pilotDict, pilotCEDict, expected):
""" Testing SiteDirector()._updatePilotStatus()
"""
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.AgentModule.__init__")
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.gConfig.getValue", side_effect=mockGCReply)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.CSGlobals.getSetup", side_effect=mockCSGlobalReply)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.AgentModule", side_effect=mockAM)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.pilotAgentsDB", side_effect=mockPilotAgentsDB)
sd = SiteDirector()
sd.log = gLogger
sd.am_getOption = mockAM
sd.log.setLevel('DEBUG')
sd.rpcMatcher = MagicMock()
sd.rssClient = MagicMock()
res = sd._updatePilotStatus(pilotRefs, pilotDict, pilotCEDict)
assert res == expected
|
petricm/DIRAC
|
WorkloadManagementSystem/Agent/test/Test_Agent_SiteDirector.py
|
Python
|
gpl-3.0
| 9,644
|
[
"DIRAC"
] |
ae26f29d270887828b27fa29230c8dab7fdea6d1448baa532011c32efe72cafd
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""Hydrogen Bond Analysis --- :mod:`MDAnalysis.analysis.hydrogenbonds.hbond_analysis`
=====================================================================================
:Author: Paul Smith
:Year: 2019
:Copyright: GNU Public License v3
.. versionadded:: 1.0.0
This module provides methods to find and analyse hydrogen bonds in a Universe.
The :class:`HydrogenBondAnalysis` class is a new version of the original
:class:`MDAnalysis.analysis.hbonds.HydrogenBondAnalysis` class from the module
:mod:`MDAnalysis.analysis.hbonds.hbond_analysis`, which itself was modeled after the `VMD
HBONDS plugin`_.
.. _`VMD HBONDS plugin`: http://www.ks.uiuc.edu/Research/vmd/plugins/hbonds/
Input
------
Required:
- *universe* : an MDAnalysis Universe object
Options:
- *donors_sel* [None] : Atom selection for donors. If `None`, then will be identified via the topology.
- *hydrogens_sel* [None] : Atom selection for hydrogens. If `None`, then will be identified via charge and mass.
- *acceptors_sel* [None] : Atom selection for acceptors. If `None`, then will be identified via charge.
- *d_h_cutoff* (Å) [1.2] : Distance cutoff used for finding donor-hydrogen pairs
- *d_a_cutoff* (Å) [3.0] : Distance cutoff for hydrogen bonds. This cutoff refers to the D-A distance.
- *d_h_a_angle_cutoff* (degrees) [150] : D-H-A angle cutoff for hydrogen bonds.
- *update_selections* [True] : If true, will update atom selections at each frame.
Output
------
- *frame* : frame at which a hydrogen bond was found
- *donor id* : atom id of the hydrogen bond donor atom
- *hydrogen id* : atom id of the hydrogen bond hydrogen atom
- *acceptor id* : atom id of the hydrogen bond acceptor atom
- *distance* (Å): length of the hydrogen bond
- *angle* (degrees): angle of the hydrogen bond
Hydrogen bond data are returned in a :class:`numpy.ndarray` on a "one line, one observation" basis
and can be accessed via :attr:`HydrogenBondAnalysis.results.hbonds`::
results = [
[
<frame>,
<donor index (0-based)>,
<hydrogen index (0-based)>,
<acceptor index (0-based)>,
<distance>,
<angle>
],
...
]
Example use of :class:`HydrogenBondAnalysis`
--------------------------------------------
The simplest use case is to allow :class:`HydrogenBondAnalysis` to guess the acceptor and hydrogen atoms, and to
identify donor-hydrogen pairs via the bonding information in the topology::
import MDAnalysis
from MDAnalysis.analysis.hydrogenbonds.hbond_analysis import HydrogenBondAnalysis as HBA
u = MDAnalysis.Universe(psf, trajectory)
hbonds = HBA(universe=u)
hbonds.run()
It is also possible to specify which hydrogens and acceptors to use in the analysis. For example, to find all hydrogen
bonds in water::
import MDAnalysis
from MDAnalysis.analysis.hydrogenbonds.hbond_analysis import HydrogenBondAnalysis as HBA
u = MDAnalysis.Universe(psf, trajectory)
hbonds = HBA(universe=u, hydrogens_sel='resname TIP3 and name H1 H2', acceptors_sel='resname TIP3 and name OH2')
hbonds.run()
Alternatively, :attr:`hydrogens_sel` and :attr:`acceptors_sel` may be generated via the :attr:`guess_hydrogens` and
:attr:`guess_acceptors`. This selection strings may then be modified prior to calling :attr:`run`, or a subset of
the universe may be used to guess the atoms. For example, find hydrogens and acceptors belonging to a protein::
import MDAnalysis
from MDAnalysis.analysis.hydrogenbonds.hbond_analysis import HydrogenBondAnalysis as HBA
u = MDAnalysis.Universe(psf, trajectory)
hbonds = HBA(universe=u)
hbonds.hydrogens_sel = hbonds.guess_hydrogens("protein")
hbonds.acceptors_sel = hbonds.guess_acceptors("protein")
hbonds.run()
Slightly more complex selection strings are also possible. For example, to find hydrogen bonds involving a protein and
any water molecules within 10 Å of the protein (which may be useful for subsequently finding the lifetime of
protein-water hydrogen bonds or finding water-bridging hydrogen bond paths)::
import MDAnalysis
from MDAnalysis.analysis.hydrogenbonds.hbond_analysis import HydrogenBondAnalysis as HBA
u = MDAnalysis.Universe(psf, trajectory)
hbonds = HBA(universe=u)
protein_hydrogens_sel = hbonds.guess_hydrogens("protein")
protein_acceptors_sel = hbonds.guess_acceptors("protein")
water_hydrogens_sel = "resname TIP3 and name H1 H2"
water_acceptors_sel = "resname TIP3 and name OH2"
hbonds.hydrogens_sel = f"({protein_hydrogens_sel}) or ({water_hydrogens_sel} and around 10 not resname TIP3})"
hbonds.acceptors_sel = f"({protein_acceptors_sel}) or ({water_acceptors_sel} and around 10 not resname TIP3})"
hbonds.run()
To calculate the hydrogen bonds between different groups, for example a
protein and water, one can use the :attr:`between` keyword. The
following will find protein-water hydrogen bonds but not protein-protein
or water-water hydrogen bonds::
import MDAnalysis
from MDAnalysis.analysis.hydrogenbonds.hbond_analysis import (
HydrogenBondAnalysis as HBA)
u = MDAnalysis.Universe(psf, trajectory)
hbonds = HBA(
universe=u,
between=['resname TIP3', 'protein']
)
protein_hydrogens_sel = hbonds.guess_hydrogens("protein")
protein_acceptors_sel = hbonds.guess_acceptors("protein")
water_hydrogens_sel = "resname TIP3 and name H1 H2"
water_acceptors_sel = "resname TIP3 and name OH2"
hbonds.hydrogens_sel = f"({protein_hydrogens_sel}) or ({water_hydrogens_sel}"
hbonds.acceptors_sel = f"({protein_acceptors_sel}) or ({water_acceptors_sel}"
hbonds.run()
It is further possible to compute hydrogen bonds between several groups with
with use of :attr:`between`. If in the above example,
`between=[['resname TIP3', 'protein'], ['protein', 'protein']]`, all
protein-water and protein-protein hydrogen bonds will be found, but
no water-water hydrogen bonds.
In order to compute the hydrogen bond lifetime, after finding hydrogen bonds
one can use the :attr:`lifetime` function::
...
hbonds.run()
tau_timeseries, timeseries = hbonds.lifetime()
It is **highly recommended** that a topology with bond information is used to
generate the universe, e.g `PSF`, `TPR`, or `PRMTOP` files. This is the only
method by which it can be guaranteed that donor-hydrogen pairs are correctly
identified. However, if, for example, a `PDB` file is used instead, a
:attr:`donors_sel` may be provided along with a :attr:`hydrogens_sel` and the
donor-hydrogen pairs will be identified via a distance cutoff,
:attr:`d_h_cutoff`::
import MDAnalysis
from MDAnalysis.analysis.hydrogenbonds.hbond_analysis import (
HydrogenBondAnalysis as HBA)
u = MDAnalysis.Universe(pdb, trajectory)
hbonds = HBA(
universe=u,
donors_sel='resname TIP3 and name OH2',
hydrogens_sel='resname TIP3 and name H1 H2',
acceptors_sel='resname TIP3 and name OH2',
d_h_cutoff=1.2
)
hbonds.run()
The class and its methods
-------------------------
.. autoclass:: HydrogenBondAnalysis
:members:
.. attribute:: results.hbonds
A :class:`numpy.ndarray` which contains a list of all observed hydrogen
bond interactions. See `Output`_ for more information.
.. versionadded:: 2.0.0
.. attribute:: hbonds
Alias to the :attr:`results.hbonds` attribute.
.. deprecated:: 2.0.0
Will be removed in MDAnalysis 3.0.0. Please use
:attr:`results.hbonds` instead.
"""
import logging
import warnings
from collections.abc import Iterable
import numpy as np
from ..base import AnalysisBase, Results
from MDAnalysis.lib.distances import capped_distance, calc_angles
from MDAnalysis.lib.correlations import autocorrelation, correct_intermittency
from MDAnalysis.exceptions import NoDataError
from MDAnalysis.core.groups import AtomGroup
from ...due import due, Doi
due.cite(Doi("10.1039/C9CP01532A"),
description="Hydrogen bond analysis implementation",
path="MDAnalysis.analysis.hydrogenbonds.hbond_analysis",
cite_module=True)
del Doi
class HydrogenBondAnalysis(AnalysisBase):
"""
Perform an analysis of hydrogen bonds in a Universe.
"""
def __init__(self, universe,
donors_sel=None, hydrogens_sel=None, acceptors_sel=None,
between=None, d_h_cutoff=1.2,
d_a_cutoff=3.0, d_h_a_angle_cutoff=150,
update_selections=True):
"""Set up atom selections and geometric criteria for finding hydrogen
bonds in a Universe.
Parameters
----------
universe : Universe
MDAnalysis Universe object
donors_sel : str
Selection string for the hydrogen bond donor atoms. If the
universe topology contains bonding information, leave
:attr:`donors_sel` as `None` so that donor-hydrogen pairs can be
correctly identified.
hydrogens_sel : str
Selection string for the hydrogen bond hydrogen atoms. Leave as
`None` to guess which hydrogens to use in the analysis using
:attr:`guess_hydrogens`. If :attr:`hydrogens_sel` is left as
`None`, also leave :attr:`donors_sel` as None so that
donor-hydrogen pairs can be correctly identified.
acceptors_sel : str
Selection string for the hydrogen bond acceptor atoms. Leave as
`None` to guess which atoms to use in the analysis using
:attr:`guess_acceptors`
between : List (optional),
Specify two selection strings for non-updating atom groups between
which hydrogen bonds will be calculated. For example, if the donor
and acceptor selections include both protein and water, it is
possible to find only protein-water hydrogen bonds - and not
protein-protein or water-water - by specifying
between=["protein", "SOL"]`. If a two-dimensional list is
passed, hydrogen bonds between each pair will be found. For
example, between=[["protein", "SOL"], ["protein", "protein"]]`
will calculate all protein-water and protein-protein hydrogen
bonds but not water-water hydrogen bonds. If `None`, hydrogen
bonds between all donors and acceptors will be calculated.
d_h_cutoff : float (optional)
Distance cutoff used for finding donor-hydrogen pairs.
Only used to find donor-hydrogen pairs if the
universe topology does not contain bonding information
d_a_cutoff : float (optional)
Distance cutoff for hydrogen bonds. This cutoff refers to the D-A distance.
d_h_a_angle_cutoff : float (optional)
D-H-A angle cutoff for hydrogen bonds, in degrees.
update_selections : bool (optional)
Whether or not to update the acceptor, donor and hydrogen
lists at each frame.
Note
----
It is highly recommended that a universe topology with bond
information is used, as this is the only way that guarantees the
correct identification of donor-hydrogen pairs.
.. versionadded:: 2.0.0
Added `between` keyword
"""
self.u = universe
self._trajectory = self.u.trajectory
self.donors_sel = donors_sel.strip() if donors_sel is not None else donors_sel
self.hydrogens_sel = hydrogens_sel.strip() if hydrogens_sel is not None else hydrogens_sel
self.acceptors_sel = acceptors_sel.strip() if acceptors_sel is not None else acceptors_sel
msg = ("{} is an empty selection string - no hydrogen bonds will "
"be found. This may be intended, but please check your "
"selection."
)
for sel in ['donors_sel', 'hydrogens_sel', 'acceptors_sel']:
val = getattr(self, sel)
if isinstance(val, str) and not val:
warnings.warn(msg.format(sel))
# If hydrogen bonding groups are selected, then generate
# corresponding atom groups
if between is not None:
if not isinstance(between, Iterable) or len(between) == 0:
raise ValueError("between must be a non-empty list/iterable")
if isinstance(between[0], str):
between = [between]
between_ags = []
for group1, group2 in between:
between_ags.append(
[
self.u.select_atoms(group1, updating=False),
self.u.select_atoms(group2, updating=False)
]
)
self.between_ags = between_ags
else:
self.between_ags = None
self.d_h_cutoff = d_h_cutoff
self.d_a_cutoff = d_a_cutoff
self.d_h_a_angle = d_h_a_angle_cutoff
self.update_selections = update_selections
self.results = Results()
self.results.hbonds = None
def guess_hydrogens(self,
select='all',
max_mass=1.1,
min_charge=0.3,
min_mass=0.9
):
"""Guesses which hydrogen atoms should be used in the analysis.
Parameters
----------
select: str (optional)
Selection string for atom group from which hydrogens will be identified.
max_mass: float (optional)
Maximum allowed mass of a hydrogen atom.
min_charge: float (optional)
Minimum allowed charge of a hydrogen atom.
Returns
-------
potential_hydrogens: str
String containing the :attr:`resname` and :attr:`name` of all hydrogen atoms potentially capable of forming
hydrogen bonds.
Notes
-----
This function makes use of atomic masses and atomic charges to identify which atoms are hydrogen atoms that are
capable of participating in hydrogen bonding. If an atom has a mass less than :attr:`max_mass` and an atomic
charge greater than :attr:`min_charge` then it is considered capable of participating in hydrogen bonds.
If :attr:`hydrogens_sel` is `None`, this function is called to guess the selection.
Alternatively, this function may be used to quickly generate a :class:`str` of potential hydrogen atoms involved
in hydrogen bonding. This str may then be modified before being used to set the attribute
:attr:`hydrogens_sel`.
"""
if min_mass > max_mass:
raise ValueError("min_mass is higher than (or equal to) max_mass")
ag = self.u.select_atoms(select)
hydrogens_ag = ag[
np.logical_and.reduce((
ag.masses < max_mass,
ag.charges > min_charge,
ag.masses > min_mass,
))
]
hydrogens_list = np.unique(
[
'(resname {} and name {})'.format(r, p) for r, p in zip(hydrogens_ag.resnames, hydrogens_ag.names)
]
)
return " or ".join(hydrogens_list)
def guess_donors(self, select='all', max_charge=-0.5):
"""Guesses which atoms could be considered donors in the analysis. Only use if the universe topology does not
contain bonding information, otherwise donor-hydrogen pairs may be incorrectly assigned.
Parameters
----------
select: str (optional)
Selection string for atom group from which donors will be identified.
max_charge: float (optional)
Maximum allowed charge of a donor atom.
Returns
-------
potential_donors: str
String containing the :attr:`resname` and :attr:`name` of all atoms that potentially capable of forming
hydrogen bonds.
Notes
-----
This function makes use of and atomic charges to identify which atoms could be considered donor atoms in the
hydrogen bond analysis. If an atom has an atomic charge less than :attr:`max_charge`, and it is within
:attr:`d_h_cutoff` of a hydrogen atom, then it is considered capable of participating in hydrogen bonds.
If :attr:`donors_sel` is `None`, and the universe topology does not have bonding information, this function is
called to guess the selection.
Alternatively, this function may be used to quickly generate a :class:`str` of potential donor atoms involved
in hydrogen bonding. This :class:`str` may then be modified before being used to set the attribute
:attr:`donors_sel`.
"""
# We need to know `hydrogens_sel` before we can find donors
# Use a new variable `hydrogens_sel` so that we do not set `self.hydrogens_sel` if it is currently `None`
if self.hydrogens_sel is None:
hydrogens_sel = self.guess_hydrogens()
else:
hydrogens_sel = self.hydrogens_sel
hydrogens_ag = self.u.select_atoms(hydrogens_sel)
ag = hydrogens_ag.residues.atoms.select_atoms(
"({donors_sel}) and around {d_h_cutoff} {hydrogens_sel}".format(
donors_sel=select,
d_h_cutoff=self.d_h_cutoff,
hydrogens_sel=hydrogens_sel
)
)
donors_ag = ag[ag.charges < max_charge]
donors_list = np.unique(
[
'(resname {} and name {})'.format(r, p) for r, p in zip(donors_ag.resnames, donors_ag.names)
]
)
return " or ".join(donors_list)
def guess_acceptors(self, select='all', max_charge=-0.5):
"""Guesses which atoms could be considered acceptors in the analysis.
Parameters
----------
select: str (optional)
Selection string for atom group from which acceptors will be identified.
max_charge: float (optional)
Maximum allowed charge of an acceptor atom.
Returns
-------
potential_acceptors: str
String containing the :attr:`resname` and :attr:`name` of all atoms that potentially capable of forming
hydrogen bonds.
Notes
-----
This function makes use of and atomic charges to identify which atoms could be considered acceptor atoms in the
hydrogen bond analysis. If an atom has an atomic charge less than :attr:`max_charge` then it is considered
capable of participating in hydrogen bonds.
If :attr:`acceptors_sel` is `None`, this function is called to guess the selection.
Alternatively, this function may be used to quickly generate a :class:`str` of potential acceptor atoms involved
in hydrogen bonding. This :class:`str` may then be modified before being used to set the attribute
:attr:`acceptors_sel`.
"""
ag = self.u.select_atoms(select)
acceptors_ag = ag[ag.charges < max_charge]
acceptors_list = np.unique(
[
'(resname {} and name {})'.format(r, p) for r, p in zip(acceptors_ag.resnames, acceptors_ag.names)
]
)
return " or ".join(acceptors_list)
def _get_dh_pairs(self):
"""Finds donor-hydrogen pairs.
Returns
-------
donors, hydrogens: AtomGroup, AtomGroup
AtomGroups corresponding to all donors and all hydrogens. AtomGroups are ordered such that, if zipped, will
produce a list of donor-hydrogen pairs.
"""
# If donors_sel is not provided, use topology to find d-h pairs
if self.donors_sel is None:
# We're using u._topology.bonds rather than u.bonds as it is a million times faster to access.
# This is because u.bonds also calculates properties of each bond (e.g bond length).
# See https://github.com/MDAnalysis/mdanalysis/issues/2396#issuecomment-596251787
if not (hasattr(self.u._topology, 'bonds') and len(self.u._topology.bonds.values) != 0):
raise NoDataError('Cannot assign donor-hydrogen pairs via topology as no bond information is present. '
'Please either: load a topology file with bond information; use the guess_bonds() '
'topology guesser; or set HydrogenBondAnalysis.donors_sel so that a distance cutoff '
'can be used.')
hydrogens = self.u.select_atoms(self.hydrogens_sel)
donors = sum(h.bonded_atoms[0] for h in hydrogens) if hydrogens \
else AtomGroup([], self.u)
# Otherwise, use d_h_cutoff as a cutoff distance
else:
hydrogens = self.u.select_atoms(self.hydrogens_sel)
donors = self.u.select_atoms(self.donors_sel)
donors_indices, hydrogen_indices = capped_distance(
donors.positions,
hydrogens.positions,
max_cutoff=self.d_h_cutoff,
box=self.u.dimensions,
return_distances=False
).T
donors = donors[donors_indices]
hydrogens = hydrogens[hydrogen_indices]
return donors, hydrogens
def _filter_atoms(self, donors, hydrogens, acceptors):
"""Filter donor, hydrogen and acceptor atoms to consider only hydrogen
bonds between two or more specified groups.
Groups are specified with the `between` keyword when creating the
HydrogenBondAnalysis object.
Returns
-------
donors, hydrogens, acceptors: Filtered AtomGroups
"""
mask = np.full(donors.n_atoms, fill_value=False)
for group1, group2 in self.between_ags:
# Find donors in G1 and acceptors in G2
mask[
np.logical_and(
np.in1d(donors.indices, group1.indices),
np.in1d(acceptors.indices, group2.indices)
)
] = True
# Find acceptors in G1 and donors in G2
mask[
np.logical_and(
np.in1d(acceptors.indices, group1.indices),
np.in1d(donors.indices, group2.indices)
)
] = True
return donors[mask], hydrogens[mask], acceptors[mask]
def _prepare(self):
self.results.hbonds = [[], [], [], [], [], []]
# Set atom selections if they have not been provided
if self.acceptors_sel is None:
self.acceptors_sel = self.guess_acceptors()
if self.hydrogens_sel is None:
self.hydrogens_sel = self.guess_hydrogens()
# Select atom groups
self._acceptors = self.u.select_atoms(self.acceptors_sel,
updating=self.update_selections)
self._donors, self._hydrogens = self._get_dh_pairs()
def _single_frame(self):
box = self._ts.dimensions
# Update donor-hydrogen pairs if necessary
if self.update_selections:
self._donors, self._hydrogens = self._get_dh_pairs()
# find D and A within cutoff distance of one another
# min_cutoff = 1.0 as an atom cannot form a hydrogen bond with itself
d_a_indices, d_a_distances = capped_distance(
self._donors.positions,
self._acceptors.positions,
max_cutoff=self.d_a_cutoff,
min_cutoff=1.0,
box=box,
return_distances=True,
)
# Remove D-A pairs more than d_a_cutoff away from one another
tmp_donors = self._donors[d_a_indices.T[0]]
tmp_hydrogens = self._hydrogens[d_a_indices.T[0]]
tmp_acceptors = self._acceptors[d_a_indices.T[1]]
# Remove donor-acceptor pairs between pairs of AtomGroups we are not
# interested in
if self.between_ags is not None:
tmp_donors, tmp_hydrogens, tmp_acceptors = \
self._filter_atoms(tmp_donors, tmp_hydrogens, tmp_acceptors)
# Find D-H-A angles greater than d_h_a_angle_cutoff
d_h_a_angles = np.rad2deg(
calc_angles(
tmp_donors.positions,
tmp_hydrogens.positions,
tmp_acceptors.positions,
box=box
)
)
hbond_indices = np.where(d_h_a_angles > self.d_h_a_angle)[0]
# Retrieve atoms, distances and angles of hydrogen bonds
hbond_donors = tmp_donors[hbond_indices]
hbond_hydrogens = tmp_hydrogens[hbond_indices]
hbond_acceptors = tmp_acceptors[hbond_indices]
hbond_distances = d_a_distances[hbond_indices]
hbond_angles = d_h_a_angles[hbond_indices]
# Store data on hydrogen bonds found at this frame
self.results.hbonds[0].extend(np.full_like(hbond_donors,
self._ts.frame))
self.results.hbonds[1].extend(hbond_donors.indices)
self.results.hbonds[2].extend(hbond_hydrogens.indices)
self.results.hbonds[3].extend(hbond_acceptors.indices)
self.results.hbonds[4].extend(hbond_distances)
self.results.hbonds[5].extend(hbond_angles)
def _conclude(self):
self.results.hbonds = np.asarray(self.results.hbonds).T
@property
def hbonds(self):
wmsg = ("The `hbonds` attribute was deprecated in MDAnalysis 2.0.0 "
"and will be removed in MDAnalysis 3.0.0. Please use "
"`results.hbonds` instead.")
warnings.warn(wmsg, DeprecationWarning)
return self.results.hbonds
def lifetime(self, tau_max=20, window_step=1, intermittency=0):
"""Computes and returns the time-autocorrelation
(HydrogenBondLifetimes) of hydrogen bonds.
Before calling this method, the hydrogen bonds must first be computed
with the `run()` function. The same `start`, `stop` and `step`
parameters used in finding hydrogen bonds will be used here for
calculating hydrogen bond lifetimes. That is, the same frames will be
used in the analysis.
Unique hydrogen bonds are identified using hydrogen-acceptor pairs.
This means an acceptor switching to a different hydrogen atom - with
the same donor - from one frame to the next is considered a different
hydrogen bond.
Please see :func:`MDAnalysis.lib.correlations.autocorrelation` and
:func:`MDAnalysis.lib.correlations.intermittency` functions for more
details.
Parameters
----------
window_step : int, optional
The number of frames between each t(0).
tau_max : int, optional
Hydrogen bond lifetime is calculated for frames in the range
1 <= `tau` <= `tau_max`
intermittency : int, optional
The maximum number of consecutive frames for which a bond can
disappear but be counted as present if it returns at the next
frame. An intermittency of `0` is equivalent to a continuous
autocorrelation, which does not allow for hydrogen bond
disappearance. For example, for `intermittency=2`, any given
hydrogen bond may disappear for up to two consecutive frames yet
be treated as being present at all frames. The default is
continuous (intermittency=0).
Returns
-------
tau_timeseries : np.array
tau from 1 to `tau_max`
timeseries : np.array
autcorrelation value for each value of `tau`
"""
if self.results.hbonds is None:
logging.error(
"Autocorrelation analysis of hydrogen bonds cannot be done"
"before the hydrogen bonds are found"
)
logging.error(
"Autocorrelation: Please use the .run() before calling this"
"function"
)
raise NoDataError(".hbonds attribute is None: use .run() first")
if self.step != 1:
logging.warning(
"Autocorrelation: Hydrogen bonds were computed with step > 1."
)
logging.warning(
"Autocorrelation: We recommend recomputing hydrogen bonds with"
" step = 1."
)
logging.warning(
"Autocorrelation: if you would like to allow bonds to break"
" and reform, please use 'intermittency'"
)
# Extract the hydrogen bonds IDs only in the format
# [set(superset(x1,x2), superset(x3,x4)), ..]
found_hydrogen_bonds = [set() for _ in self.frames]
for frame_index, frame in enumerate(self.frames):
for hbond in self.results.hbonds[self.results.hbonds[:, 0] == frame]:
found_hydrogen_bonds[frame_index].add(frozenset(hbond[2:4]))
intermittent_hbonds = correct_intermittency(
found_hydrogen_bonds,
intermittency=intermittency
)
tau_timeseries, timeseries, timeseries_data = autocorrelation(
intermittent_hbonds,
tau_max,
window_step=window_step
)
return np.vstack([tau_timeseries, timeseries])
def count_by_time(self):
"""Counts the number of hydrogen bonds per timestep.
Returns
-------
counts : numpy.ndarray
Contains the total number of hydrogen bonds found at each timestep.
Can be used along with :attr:`HydrogenBondAnalysis.times` to plot
the number of hydrogen bonds over time.
"""
indices, tmp_counts = np.unique(self.results.hbonds[:, 0], axis=0,
return_counts=True)
indices -= self.start
indices /= self.step
counts = np.zeros_like(self.frames)
counts[indices.astype(np.intp)] = tmp_counts
return counts
def count_by_type(self):
"""Counts the total number of each unique type of hydrogen bond.
Returns
-------
counts : numpy.ndarray
Each row of the array contains the donor resname, donor atom type, acceptor resname, acceptor atom type and
the total number of times the hydrogen bond was found.
Note
----
Unique hydrogen bonds are determined through a consideration of the resname and atom type of the donor and
acceptor atoms in a hydrogen bond.
"""
d = self.u.atoms[self.hbonds[:, 1].astype(np.intp)]
a = self.u.atoms[self.hbonds[:, 3].astype(np.intp)]
tmp_hbonds = np.array([d.resnames, d.types, a.resnames, a.types],
dtype=str).T
hbond_type, type_counts = np.unique(
tmp_hbonds, axis=0, return_counts=True)
hbond_type_list = []
for hb_type, hb_count in zip(hbond_type, type_counts):
hbond_type_list.append([":".join(hb_type[:2]),
":".join(hb_type[2:4]), hb_count])
return np.array(hbond_type_list)
def count_by_ids(self):
"""Counts the total number hydrogen bonds formed by unique combinations of donor, hydrogen and acceptor atoms.
Returns
-------
counts : numpy.ndarray
Each row of the array contains the donor atom id, hydrogen atom id, acceptor atom id and the total number
of times the hydrogen bond was observed. The array is sorted by frequency of occurrence.
Note
----
Unique hydrogen bonds are determined through a consideration of the hydrogen atom id and acceptor atom id
in a hydrogen bond.
"""
d = self.u.atoms[self.hbonds[:, 1].astype(np.intp)]
h = self.u.atoms[self.hbonds[:, 2].astype(np.intp)]
a = self.u.atoms[self.hbonds[:, 3].astype(np.intp)]
tmp_hbonds = np.array([d.ids, h.ids, a.ids]).T
hbond_ids, ids_counts = np.unique(tmp_hbonds, axis=0,
return_counts=True)
# Find unique hbonds and sort rows so that most frequent observed bonds are at the top of the array
unique_hbonds = np.concatenate((hbond_ids, ids_counts[:, None]),
axis=1)
unique_hbonds = unique_hbonds[unique_hbonds[:, 3].argsort()[::-1]]
return unique_hbonds
|
MDAnalysis/mdanalysis
|
package/MDAnalysis/analysis/hydrogenbonds/hbond_analysis.py
|
Python
|
gpl-2.0
| 33,580
|
[
"MDAnalysis",
"VMD"
] |
a65207c6a7367e3bf391b2022c805500d37925e08db64ee664b8ee828bd1154a
|
"""
Functional GUI tests: run_basic() and run_detailed().
"""
import copy
from numpy import array
from numpy.testing import assert_array_equal
import topo
import topo.tests.functionaltest as ft
from topo.tests.utils import assert_array_not_equal
from nose.tools import nottest
def run_basic():
"""Check that the windows all open ok (i.e. is GUI functioning?)."""
_initialize()
s = 'Simulation'
p = 'Plots'
menu_paths = [ (s,'Test Pattern'),
(s,'Model Editor'),
(p,'Activity'),
(p,'Connection Fields'),
(p,'Projection'),
(p,'Projection Activity'),
(p,'Preference Maps','Orientation Preference'),
(p,'Tuning Curves','Orientation Tuning') ]
return ft.run([_menu_item_fn(*x) for x in menu_paths],"Running basic GUI tests...")
def run_detailed():
"""Test that more complex GUI actions are working."""
_initialize()
tests = [test_cf_coords,test_test_pattern,
test_projection,test_orientation_tuning] # and so on...
return ft.run(tests,"Running detailed GUI tests...")
######################################################################
# DETAILED TESTS
######################################################################
@nottest
def test_cf_coords():
"""Check that ConnectionFields window opens with specified coords."""
cf = topo.guimain['Plots']['Connection Fields'](x=0.125,y=0.250)
assert cf.x==0.125
assert cf.y==0.250
@nottest
def test_test_pattern():
"""Check that test pattern window is working."""
tp = topo.guimain['Simulation']['Test Pattern']()
act = topo.guimain['Plots']['Activity']()
tp.gui_set_param('edit_sheet','GS')
## Test for state_push bug (simulation not run() before Present pressed)
assert len(topo.sim.eps_to_start)>0, "test must be run before simulation is run()"
from imagen import Gaussian
from topo import numbergen
topo.sim['GS'].set_input_generator(Gaussian(x=numbergen.UniformRandom()))
tp.Present()
topo.sim.run(1)
act1 = copy.deepcopy(topo.sim['GS'].activity)
topo.sim.run(2)
assert_array_not_equal(topo.sim['GS'].activity,act1,"GeneratorSheet no longer generating patterns")
##
tp.gui_set_param('pattern_generator','TwoRectangles')
from imagen.deprecated import TwoRectangles
assert isinstance(tp.pattern_generator,TwoRectangles), "Pattern generator did not change."
preview = _get_named_plot('GS',tp.plotgroup.plots).view_dict.get('Strength',{})['Activity'].last.data
two_rectangles = array([[0.,1],[1.,0.]])
assert_array_equal(preview,two_rectangles,"Incorrect pattern in preview plot.")
tp.Present()
gs_view = _get_named_plot('GS',act.plotgroup.plots).view_dict.get('Strength',{})['Activity']
assert gs_view.metadata.src_name=='GS'
gs_plot_array = gs_view.last.data
assert_array_equal(gs_plot_array,two_rectangles,"Incorrect pattern in activity plot after Present.")
tp.params_frame.gui_set_param('scale',0.5)
preview = _get_named_plot('GS',tp.plotgroup.plots).view_dict.get('Strength',{})['Activity'].last.data
assert_array_equal(preview,0.5*two_rectangles,"Changing pattern parameters did not update preview.")
### Defaults button
# first change several more parameters
initial_preview = tp.plotgroup.plots[0].view_dict.get('Strength',{})['Activity'].last.data
new_param_values = [#('output_fns','Sigmoid'),
('scale','2')]
for name,value in new_param_values:
tp.params_frame.gui_set_param(name,value)
changed_preview = _get_named_plot('GS',tp.plotgroup.plots).view_dict.get('Strength',{})['Activity'].last.data
# and check the preview did change
try:
assert_array_equal(changed_preview,initial_preview)
except AssertionError:
pass
else:
raise AssertionError("Test pattern didn't change.")
# test that preview display is correct
tp.params_frame.Defaults()
preview = _get_named_plot('GS',tp.plotgroup.plots).view_dict.get('Strength',{})['Activity'].last.data
assert_array_equal(preview,two_rectangles,"Defaults button failed to revert params to default values.")
# CB: still need to test duration, learning, etc
@nottest
def test_projection():
"""Check the Projection window."""
p = topo.guimain['Plots']['Projection']()
p.gui_set_param('sheet','S')
p.gui_set_param('projection','GSToS')
p.gui_set_param('sheet','S2')
p.gui_set_param('projection','GS2ToS2')
p.gui_set_param('projection','GSToS2')
@nottest
def test_orientation_tuning():
"""Check that orientation tuning plot works."""
p = topo.guimain['Plots']['Tuning Curves']['Orientation Tuning']()
from topo.command.analysis import measure_or_tuning
p.pre_plot_hooks = [measure_or_tuning.instance(num_phase=1,num_orientation=1,contrasts=[30])]
p.Refresh()
######################################################################
# UTILITY FUNCTIONS
######################################################################
# make these particular tests simpler
def _initialize():
"""Make a simple simulation."""
from topo.base.simulation import Simulation
from topo.base.cf import CFSheet,CFProjection
from topo.sheet import GeneratorSheet
sim=Simulation(register=True,name="test pattern tester")
sim['GS']=GeneratorSheet(nominal_density=2)
sim['GS2']=GeneratorSheet(nominal_density=2)
sim['S'] = CFSheet(nominal_density=2)
sim['S2'] = CFSheet(nominal_density=2)
sim.connect('GS','S',connection_type=CFProjection,delay=0.05)
sim.connect('GS','S2',connection_type=CFProjection,delay=0.05)
sim.connect('GS2','S2',connection_type=CFProjection,delay=0.05)
def _menu_item_fn(*clicks):
"""Return a wrapper round topo.guimain[click1][click2]...[clickN], with __doc__ set
to the menu path."""
menu_path = 'topo.guimain'
for click in clicks:
menu_path+='["%s"]'%click
menu_item = eval(menu_path)
def test_menu_item():
menu_item()
test_menu_item.__doc__ = "%s"%menu_path[12::]
return test_menu_item
def _get_named_plot(name,plots):
for plot in plots:
if plot.plot_src_name==name:
return plot
assert False
|
ioam/topographica
|
topo/tests/gui_tests.py
|
Python
|
bsd-3-clause
| 6,347
|
[
"Gaussian"
] |
6e804b00d5d3bf4398364c94a9ade7d5662f19ca54d2eda4a75ecf1c7e29596c
|
########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import tarfile
import tempfile
import yaml
from distutils.version import LooseVersion
from shutil import rmtree
from ansible.errors import AnsibleError
from ansible.module_utils.urls import open_url
from ansible.playbook.role.requirement import RoleRequirement
from ansible.galaxy.api import GalaxyAPI
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyRole(object):
SUPPORTED_SCMS = set(['git', 'hg'])
META_MAIN = os.path.join('meta', 'main.yml')
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars','tests')
def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None):
self._metadata = None
self._install_info = None
self.options = galaxy.options
self.galaxy = galaxy
self.name = name
self.version = version
self.src = src or name
self.scm = scm
if path is not None:
if self.name not in path:
path = os.path.join(path, self.name)
self.path = path
else:
for path in galaxy.roles_paths:
role_path = os.path.join(path, self.name)
if os.path.exists(role_path):
self.path = role_path
break
else:
# use the first path by default
self.path = os.path.join(galaxy.roles_paths[0], self.name)
def __eq__(self, other):
return self.name == other.name
@property
def metadata(self):
"""
Returns role metadata
"""
if self._metadata is None:
meta_path = os.path.join(self.path, self.META_MAIN)
if os.path.isfile(meta_path):
try:
f = open(meta_path, 'r')
self._metadata = yaml.safe_load(f)
except:
display.vvvvv("Unable to load metadata for %s" % self.name)
return False
finally:
f.close()
return self._metadata
@property
def install_info(self):
"""
Returns role install info
"""
if self._install_info is None:
info_path = os.path.join(self.path, self.META_INSTALL)
if os.path.isfile(info_path):
try:
f = open(info_path, 'r')
self._install_info = yaml.safe_load(f)
except:
display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
return False
finally:
f.close()
return self._install_info
def _write_galaxy_install_info(self):
"""
Writes a YAML-formatted file to the role's meta/ directory
(named .galaxy_install_info) which contains some information
we can use later for commands like 'list' and 'info'.
"""
info = dict(
version=self.version,
install_date=datetime.datetime.utcnow().strftime("%c"),
)
info_path = os.path.join(self.path, self.META_INSTALL)
with open(info_path, 'w+') as f:
try:
self._install_info = yaml.safe_dump(info, f)
except:
return False
return True
def remove(self):
"""
Removes the specified role from the roles path.
There is a sanity check to make sure there's a meta/main.yml file at this
path so the user doesn't blow away random directories.
"""
if self.metadata:
try:
rmtree(self.path)
return True
except:
pass
return False
def fetch(self, role_data):
"""
Downloads the archived role from github to a temp location
"""
if role_data:
# first grab the file and save it to a temp location
if "github_user" in role_data and "github_repo" in role_data:
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
else:
archive_url = self.src
display.display("- downloading role from %s" % archive_url)
try:
url_file = open_url(archive_url)
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
temp_file.write(data)
data = url_file.read()
temp_file.close()
return temp_file.name
except Exception as e:
display.error("failed to download the file: %s" % str(e))
return False
def install(self):
# the file is a tar, so open it that way and extract it
# to the specified (or default) roles directory
if self.scm:
# create tar file from scm url
tmp_file = RoleRequirement.scm_archive_role(**self.spec)
elif self.src:
if os.path.isfile(self.src):
# installing a local tar.gz
tmp_file = self.src
elif '://' in self.src:
role_data = self.src
tmp_file = self.fetch(role_data)
else:
api = GalaxyAPI(self.galaxy)
role_data = api.lookup_role_by_name(self.src)
if not role_data:
raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server))
role_versions = api.fetch_role_related('versions', role_data['id'])
if not self.version:
# convert the version names to LooseVersion objects
# and sort them to get the latest version. If there
# are no versions in the list, we'll grab the head
# of the master branch
if len(role_versions) > 0:
loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
loose_versions.sort()
self.version = str(loose_versions[-1])
elif role_data.get('github_branch', None):
self.version = role_data['github_branch']
else:
self.version = 'master'
elif self.version != 'master':
if role_versions and self.version not in [a.get('name', None) for a in role_versions]:
raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, self.name, role_versions))
tmp_file = self.fetch(role_data)
else:
raise AnsibleError("No valid role data found")
if tmp_file:
display.debug("installing from %s" % tmp_file)
if not tarfile.is_tarfile(tmp_file):
raise AnsibleError("the file downloaded was not a tar.gz")
else:
if tmp_file.endswith('.gz'):
role_tar_file = tarfile.open(tmp_file, "r:gz")
else:
role_tar_file = tarfile.open(tmp_file, "r")
# verify the role's meta file
meta_file = None
members = role_tar_file.getmembers()
# next find the metadata file
for member in members:
if self.META_MAIN in member.name:
meta_file = member
break
if not meta_file:
raise AnsibleError("this role does not appear to have a meta/main.yml file.")
else:
try:
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
except:
raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
# we strip off the top-level directory for all of the files contained within
# the tar file here, since the default is 'github_repo-target', and change it
# to the specified role's name
display.display("- extracting %s to %s" % (self.name, self.path))
try:
if os.path.exists(self.path):
if not os.path.isdir(self.path):
raise AnsibleError("the specified roles path exists and is not a directory.")
elif not getattr(self.options, "force", False):
raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name)
else:
# using --force, remove the old path
if not self.remove():
raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really want to put the role here." % self.path)
else:
os.makedirs(self.path)
# now we do the actual extraction to the path
for member in members:
# we only extract files, and remove any relative path
# bits that might be in the file for security purposes
# and drop the leading directory, as mentioned above
if member.isreg() or member.issym():
parts = member.name.split(os.sep)[1:]
final_parts = []
for part in parts:
if part != '..' and '~' not in part and '$' not in part:
final_parts.append(part)
member.name = os.path.join(*final_parts)
role_tar_file.extract(member, self.path)
# write out the install info file for later use
self._write_galaxy_install_info()
except OSError as e:
raise AnsibleError("Could not update files in %s: %s" % (self.path, str(e)))
# return the parsed yaml metadata
display.display("- %s was installed successfully" % self.name)
try:
os.unlink(tmp_file)
except (OSError,IOError) as e:
display.warning("Unable to remove tmp file (%s): %s" % (tmp_file, str(e)))
return True
return False
@property
def spec(self):
"""
Returns role spec info
{
'scm': 'git',
'src': 'http://git.example.com/repos/repo.git',
'version': 'v1.0',
'name': 'repo'
}
"""
return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
|
hfinucane/ansible
|
lib/ansible/galaxy/role.py
|
Python
|
gpl-3.0
| 12,164
|
[
"Brian",
"Galaxy"
] |
f9ca892e9c29f917e6a06b94b51d81ea313823ddaad0cf9978807425031a4a03
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) cgstudiomap.org <cgstudiomap@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Res Partner Count Iframe Host',
'version': 'beta',
'author': 'cgstudiomap',
'maintainer': 'cgstudiomap',
'license': 'AGPL-3',
'category': 'Main',
'summary': 'Make the host to be able to see if the visit comes from an iframe',
'depends': [
'res_partner_count',
'website_iframe_host',
],
'external_dependencies': {
},
'data': [],
'installable': True,
}
|
cgstudiomap/cgstudiomap
|
main/local_modules/res_partner_count_iframe_host/__openerp__.py
|
Python
|
agpl-3.0
| 1,424
|
[
"VisIt"
] |
7574e41090dafde890f1f889d54923bdd0af014a3d54a2c6530d7396729be639
|
#http://www.raspberrypi-spy.co.uk/2012/05/send-text-and-html-email-using-python/
# Import smtplib to provide email functions
import smtplib
# Import the email modules
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import MySQLdb as mdb
# Define email addresses to use
con = mdb.connect('localhost', 'root', 'pi', 'pop_PI');
with con:
cur = con.cursor()
cur.execute("SELECT Email from MemberAccount where RFID = '%s'" % RFID )
addr_to = cur.fetchone()
cur.execute("select Account from MemberAccount where RFID = '%s'" % RFID)
balance = cur.fetchone() # retrieve balance from the account
addr_from = 'pop.pi@ents.ca'
if addr_to > 0:
# Define SMTP email server details
smtp_server = 'smtp.gmail.com:587'
smtp_user = 'pop.pi@ents.ca'
smtp_pass = 'knockknockknockpenny'
# Construct email
msg = MIMEMultipart('alternative')
msg['To'] = addr_to
msg['From'] = addr_from
msg['Subject'] = 'Test Email From RPi'
# Create the body of the message (a plain-text and an HTML version).
text = "This is a test message.\nText and html."
html = """\
<html>
<head></head>
<body>
<h1>Your Account Balance Is.</h1>
<p><h1>d%</h1></p>
<p>To top up your account visit poppi.ents.ca</p>
</body>
</html>
""" % balance
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part1)
msg.attach(part2)
# Send the message via an SMTP server
s = smtplib.SMTP(smtp_server)
s.starttls()
s.login(smtp_user,smtp_pass)
s.sendmail(addr_from, addr_to, msg.as_string())
s.quit()
if con:
con.close()
|
renderlab/ENTS-Pop-Pi
|
pop-PI/EmailTest.py
|
Python
|
gpl-2.0
| 1,849
|
[
"VisIt"
] |
11b46398ec60cda3e8b200ea6eaefddf6a43f341c00caabac2661570b790c883
|
"""
Suppose a casino invents a new game that you must pay $250 to play. The game
works like this: The casino draws random numbers between 0 and 1, from a
uniform distribution. It adds them together until their sum is greater than 1,
at which time it stops drawing new numbers. You get a payout of $100 each time
a new number is drawn.
For example, suppose the casino draws 0.4 and then 0.7. Since the sum is
greater than 1, it will stop after these two draws, and you receive $200. If
instead it draws 0.2, 0.3, 0.3, and then 0.6, it will stop after the fourth
draw and you will receive $400. Given the $250 entrance fee, should you play
the game?
Specifically, what is the expected value of your winnings?
From:
http://fivethirtyeight.com/features/
should-you-pay-250-to-play-this-casino-game
"""
import numpy as np
def trial():
total = 0
spins = 0
while total < 1:
total += np.random.random()
spins += 1
return spins
def main():
n = 10000000
dollar_return = (np.mean([trial() for _ in range(n)]))
return_on_stake = 100 * dollar_return
print(return_on_stake)
if __name__ == '__main__':
main()
|
noelevans/sandpit
|
fivethiryeight/riddler_casino.py
|
Python
|
mit
| 1,180
|
[
"CASINO"
] |
699b41230be106aa3cd140c73d6a533f7f301347233744743a5ee375319eff73
|
import matplotlib as mpl
mpl.use('Agg')
import numpy as np
import matplotlib.pyplot as py
host = ['LMC']
marker = ['o']#'o','v','^','<','>','D','+','x','*']
alpha_eff = np.dtype([('Period',np.float32),('observed_w',np.float32),('residual',np.float32),('error',np.float32),('alpha',np.float32),('name',np.str_,5)])
P,mw,re,er,HP,galaxy = np.loadtxt('../output/chains/effective_hyperparameters_cepheids.txt',unpack=True,usecols=[0,1,2,3,4,5],dtype=alpha_eff)
fig = py.figure()
#fig.subplots_adjust(bottom=0.1, left=0.06, top=0.85, right=0.97,hspace=0.3)
py.subplot(2,1,1)
indexs = 0
indexf = 0
for index in range(len(P)):
if galaxy[index] == host[0]:
indexf = index
for index in range(indexs,indexf+1):
if HP[index] == 1.:
py.errorbar(P[index],mw[index],yerr=er[index]/np.sqrt(HP[index]),xerr=None,ecolor='k',marker=marker[0],mfc='k',fmt='',ls='None',label=host[0])
elif HP[index] < 1. and HP[index] >= .1:
py.errorbar(P[index],mw[index],yerr=er[index]/np.sqrt(HP[index]),xerr=None,ecolor='k',marker=marker[0],mfc='b',fmt='',ls='None',label=host[0])
elif HP[index] < .1 and HP[index] >= 1.e-2:
py.errorbar(P[index],mw[index],yerr=er[index]/np.sqrt(HP[index]),xerr=None,ecolor='k',marker=marker[0],mfc='g',fmt='',ls='None',label=host[0])
elif HP[index] < 1.e-2 and HP[index] >= 1.e-3:
py.errorbar(P[index],mw[index],yerr=er[index]/np.sqrt(HP[index]),xerr=None,ecolor='k',marker=marker[0],mfc='r',fmt='',ls='None',label=host[0])
elif HP[index] < 1.e-3 :
py.errorbar(P[index],mw[index],yerr=er[index]/np.sqrt(HP[index]),xerr=None,ecolor='k',mfc='y',marker=marker[0],fmt='',ls='None',label=host[0])
#py.scatter(P,mw)
py.plot(P,mw-re,markersize='small',color='k')
py.xscale('log')
py.xlim(1,2.e2)
py.title('LMC Cepheid variables')
#py.xlabel('Period [days]')
py.ylabel('W [mag]')
py.gca().invert_yaxis()
py.subplot(2,1,2)
indexs = 0
indexf = 0
for index in range(len(P)):
if galaxy[index] == host[0]:
indexf = index
for index in range(indexs,indexf+1):
if HP[index] == 1.:
py.errorbar(P[index],re[index],yerr=er[index],xerr=None,ecolor='k',marker=marker[0],mfc='k',fmt='',ls='None',label=host[0])
elif HP[index] < 1. and HP[index] >= .1:
py.errorbar(P[index],re[index],yerr=er[index],xerr=None,ecolor='k',marker=marker[0],mfc='b',fmt='',ls='None',label=host[0])
elif HP[index] < .1 and HP[index] >= 1.e-2:
py.errorbar(P[index],re[index],yerr=er[index],xerr=None,ecolor='k',marker=marker[0],mfc='g',fmt='',ls='None',label=host[0])
elif HP[index] < 1.e-2 and HP[index] >= 1.e-3:
py.errorbar(P[index],re[index],yerr=er[index],xerr=None,ecolor='k',marker=marker[0],mfc='r',fmt='',ls='None',label=host[0])
elif HP[index] < 1.e-3 :
py.errorbar(P[index],re[index],yerr=er[index],xerr=None,ecolor='k',mfc='y',marker=marker[0],fmt='',ls='None',label=host[0])
py.xscale('log')
py.yscale('linear')
py.hlines(0.,1.,2.e2,color='k',linestyles='dotted')
#py.ylim(5.e-3,1.e1)
py.xlim(1.e0,2.e2)
py.xlabel('Period [days]')
py.ylabel('W residual [mag]')
#py.legend(loc=0,numpoints=1,ncol=4)
py.savefig('../output/chains/effective_HP_cepheids.pdf')
exit()
|
wilmarcardonac/hypermcmc
|
analyzer/plot_HP_LMC.py
|
Python
|
gpl-2.0
| 3,241
|
[
"Galaxy"
] |
0b2197fee41e20d0708c0514b58bcaa5f85dfe720f95fbff4c9a3fff6597701d
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.analysis.run Contains the AnalysisRun class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
from abc import ABCMeta, abstractproperty
from collections import OrderedDict
# Import the relevant PTS classes and modules
from ...core.tools import filesystem as fs
from ...core.simulation.skifile import SkiFile
from ..core.model import RTModel
from ...core.tools import sequences
from ...core.basics.composite import SimplePropertyComposite
from ..fitting.run import FittingRun
from ...core.tools.utils import lazyproperty
from ...core.tools.serialization import load_dict
from ...core.simulation.tree import DustGridTree
from ...core.simulation.grids import FileTreeDustGrid, load_grid
from ...core.simulation.wavelengthgrid import WavelengthGrid
from ..basics.projection import GalaxyProjection, EdgeOnProjection, FaceOnProjection
from ..basics.instruments import FullInstrument, SimpleInstrument, SEDInstrument
from ...magic.basics.coordinatesystem import CoordinateSystem
from ...core.basics.log import log
from ...core.remote.remote import load_remote
from ...core.basics.configuration import Configuration
from ...core.simulation.logfile import LogFile
from ...core.tools import strings
from ...core.extract.progress import ProgressTable
from ...core.extract.timeline import TimeLineTable
from ...core.extract.memory import MemoryUsageTable
from ...magic.core.dataset import DataSet
from ..core.environment import colours_name as colour_maps_name
from ..core.environment import ssfr_name as ssfr_maps_name
from ..core.environment import tir_name as tir_maps_name
from ..core.environment import attenuation_name as attenuation_maps_name
from ..core.environment import old_name as old_maps_name
from ..core.environment import young_name as young_maps_name
from ..core.environment import ionizing_name as ionizing_maps_name
from ..core.environment import dust_name as dust_maps_name
from ..core.environment import rgb_name as rgb_maps_name
from ...core.data.sed import ObservedSED, SED
from ...magic.core.datacube import DataCube
from ...core.simulation.tree import get_nleaves
from ..build.definition import ModelDefinition
from ..basics.properties import GalaxyProperties
from ...core.tools import tables
from ...core.filter.filter import parse_filter
from ...magic.core.frame import Frame
from ...core.basics.distribution import Distribution
from ...magic.region.list import SkyRegionList
# -----------------------------------------------------------------
wavelengths_filename = "wavelengths.txt"
dustgridtree_filename = "tree.dat"
# -----------------------------------------------------------------
# Set contribution nmes
total = "total"
bulge = "bulge"
disk = "disk"
old = "old"
young = "young"
ionizing = "ionizing"
unevolved = "unevolved"
extra = "extra"
# All contributions
contributions = [total, bulge, disk, old, young, ionizing, unevolved]
# -----------------------------------------------------------------
def get_analysis_run_cwd(name):
"""
This function ...
:param name:
:return:
"""
return get_analysis_run(fs.cwd(), name)
# -----------------------------------------------------------------
def get_analysis_run(modeling_path, name):
"""
This function ...
:param modeling_path:
:param name:
:return:
"""
# Get runs object
runs = AnalysisRuns(modeling_path)
# Return analysis run
return runs.load(name)
# -----------------------------------------------------------------
def get_analysis_model_cwd(name):
"""
This function ...
:param name:
:return:
"""
return get_analysis_model(fs.cwd(), name)
# -----------------------------------------------------------------
def get_analysis_model(modeling_path, name):
"""
This function ...
:param modeling_path:
:return:
"""
# Get analysis run
run = get_analysis_run(modeling_path, name)
# Return model
return run.model
# -----------------------------------------------------------------
class AnalysisRunInfo(SimplePropertyComposite):
"""
This class ...
"""
def __init__(self, **kwargs):
"""
The constructor ...
"""
# Call the constructor of the base class
super(AnalysisRunInfo, self).__init__()
# Define properties
self.add_string_property("name", "name of the analysis run")
self.add_string_property("path", "path of the analysis run")
self.add_string_property("fitting_run", "fitting run name")
self.add_string_property("generation_name", "generation name")
self.add_string_property("simulation_name", "simulation name")
self.add_string_property("model_name", "name of the model")
self.add_real_property("chi_squared", "chi squared value of the fitted model")
self.add_string_property("reference_deprojection", "name of the deprojection model that is used for the creating the instruments")
# Parameter values dictionary
self.add_section("parameter_values", "parameter values", dynamic=True)
# Set properties
self.set_properties(kwargs)
# -----------------------------------------------------------------
# Various filenames
dust_grid_filename = "dust_grid.dg"
wavelength_grid_filename = "wavelength_grid.dat"
dust_grid_build_name = "dust grid"
info_filename = "info.dat"
config_filename = "config.cfg"
launch_config_filename = "launch_config.cfg"
input_filename = "input.dat"
dust_grid_tree_filename = "tree.dat"
# Directories
model_name = "model"
instruments_name = "instruments"
projections_name = "projections"
extract_name = "extr"
plot_name = "plot"
misc_name = "misc"
evaluation_name = "evaluation"
contributions_name = "contributions"
# Analysis directories
properties_name = "properties"
attenuation_name = "attenuation"
colours_name = "colours"
fluxes_name = "fluxes"
images_name = "images"
residuals_name = "residuals"
maps_name = "maps"
absorption_name = "absorption"
heating_name = "heating"
energy_name = "energy"
sfr_name = "sfr"
correlations_name = "correlations"
# Projection filenames
earth_projection_filename = "earth.proj"
faceon_projection_filename = "faceon.proj"
edgeon_projection_filename = "edgeon.proj"
# Instrument filenames
sed_earth_instrument_filename = "earth_sed.instr"
full_sed_earth_instrument_filename = "earth_full_sed.instr"
simple_earth_instrument_filename = "earth_simple.instr"
full_earth_instrument_filename = "earth_full.instr"
simple_faceon_instrument_filename = "faceon_simple.instr"
full_faceon_instrument_filename = "faceon_full.instr"
simple_edgeon_instrument_filename = "edgeon_simple.instr"
full_edgeon_instrument_filename = "edgeon_full.instr"
# -----------------------------------------------------------------
class AnalysisRunBase(object):
"""
This class ...
"""
__metaclass__ = ABCMeta
# -----------------------------------------------------------------
@property
def from_fitting(self):
return self.fitting_run_name is not None
# -----------------------------------------------------------------
@property
def from_model(self):
return self.fitting_run_name is None
# -----------------------------------------------------------------
@property
def from_generation(self):
# Otherwise: from initial guess
return self.from_fitting and self.generation_name is not None
# -----------------------------------------------------------------
@property
def from_initial_guess(self):
# Otherwise: from best simulation of a certain generation
return self.from_fitting and self.generation_name is None
# -----------------------------------------------------------------
@property
def name(self):
return self.info.name
# -----------------------------------------------------------------
@property
def generation_name(self):
return self.info.generation_name
# -----------------------------------------------------------------
@property
def simulation_name(self):
return self.info.simulation_name
# -----------------------------------------------------------------
@property
def model_name(self):
return self.info.model_name
# -----------------------------------------------------------------
@property
def input_file_path(self):
return fs.join(self.path, input_filename)
# -----------------------------------------------------------------
@property
def ski_file_path(self):
return fs.join(self.path, self.galaxy_name + ".ski")
# -----------------------------------------------------------------
@property
def wavelength_grid_path(self):
# Set the path to the wavelength grid file
return fs.join(self.path, wavelength_grid_filename)
# -----------------------------------------------------------------
@property
def nwavelengths(self):
return len(self.wavelength_grid)
# -----------------------------------------------------------------
@property
def dust_grid_path(self):
# Set the path to the dust grid file
return fs.join(self.path, dust_grid_filename)
# -----------------------------------------------------------------
@property
def info_path(self):
# Set the path to the analysis run info file
return fs.join(self.path, info_filename)
# -----------------------------------------------------------------
@property
def config_path(self):
return fs.join(self.path, config_filename)
# -----------------------------------------------------------------
@property
def heating_config_path(self):
return fs.join(self.heating_path, config_filename)
# -----------------------------------------------------------------
@property
def launch_config_path(self):
return fs.join(self.path, launch_config_filename)
# -----------------------------------------------------------------
@property
def total_simulation_path(self):
return self.simulation_path_for_contribution(total)
# -----------------------------------------------------------------
@property
def total_output_path(self):
return self.output_path_for_contribution(total)
# -----------------------------------------------------------------
@property
def bulge_output_path(self):
return self.output_path_for_contribution(bulge)
# -----------------------------------------------------------------
@property
def disk_output_path(self):
return self.output_path_for_contribution(disk)
# -----------------------------------------------------------------
@property
def old_output_path(self):
return self.output_path_for_contribution(old)
# -----------------------------------------------------------------
@property
def young_output_path(self):
return self.output_path_for_contribution(young)
# -----------------------------------------------------------------
@property
def ionizing_output_path(self):
return self.output_path_for_contribution(ionizing)
# -----------------------------------------------------------------
@property
def unevolved_output_path(self):
return self.output_path_for_contribution(unevolved)
# -----------------------------------------------------------------
@property
def extra_output_path(self):
path = self.output_path_for_contribution(extra, create=False)
if not fs.is_directory(path): return None
else: return path
# -----------------------------------------------------------------
@property
def total_logfile_path(self):
return self.logfile_path_for_contribution(total)
# -----------------------------------------------------------------
@property
def total_extr_path(self):
return self.extraction_path_for_contribution(total)
# -----------------------------------------------------------------
@property
def total_extract_path(self):
return self.total_extr_path
# -----------------------------------------------------------------
@property
def progress_path(self):
return fs.join(self.total_extr_path, "progress.dat")
# -----------------------------------------------------------------
@property
def timeline_path(self):
return fs.join(self.total_extr_path, "timeline.dat")
# -----------------------------------------------------------------
@property
def memory_path(self):
return fs.join(self.total_extr_path, "memory.dat")
# -----------------------------------------------------------------
@property
def total_plot_path(self):
return fs.create_directory_in(self.total_simulation_path, plot_name)
# -----------------------------------------------------------------
@property
def total_misc_path(self):
return fs.create_directory_in(self.total_simulation_path, misc_name)
# -----------------------------------------------------------------
@property
def evaluation_path(self):
return fs.join(self.path, evaluation_name)
# -----------------------------------------------------------------
@property
def contributions_path(self):
return fs.join(self.path, contributions_name)
# -----------------------------------------------------------------
@property
def properties_path(self):
return fs.join(self.path, properties_name)
# -----------------------------------------------------------------
@property
def attenuation_path(self):
return fs.join(self.path, attenuation_name)
# -----------------------------------------------------------------
@property
def colours_path(self):
return fs.join(self.path, colours_name)
# -----------------------------------------------------------------
@property
def colours_observed_path(self):
return fs.join(self.colours_path, "observed")
# -----------------------------------------------------------------
@property
def colours_simulated_path(self):
return fs.join(self.colours_path, "simulated")
# -----------------------------------------------------------------
@property
def colours_residuals_path(self):
return fs.join(self.colours_path, "residuals")
# -----------------------------------------------------------------
@abstractproperty
def colour_names(self):
pass
# -----------------------------------------------------------------
@property
def fluxes_path(self):
return fs.join(self.path, fluxes_name)
# -----------------------------------------------------------------
@property
def images_path(self):
return fs.join(self.path, images_name)
# -----------------------------------------------------------------
@property
def residuals_path(self):
return fs.join(self.path, residuals_name)
# -----------------------------------------------------------------
@property
def maps_path(self):
return fs.join(self.path, maps_name)
# -----------------------------------------------------------------
@property
def colour_maps_path(self):
return fs.join(self.maps_path, colour_maps_name)
# -----------------------------------------------------------------
@property
def colour_maps_name(self):
return fs.name(self.colour_maps_path)
# -----------------------------------------------------------------
@property
def ssfr_maps_path(self):
return fs.join(self.maps_path, ssfr_maps_name)
# -----------------------------------------------------------------
@property
def ssfr_maps_name(self):
return fs.name(self.ssfr_maps_path)
# -----------------------------------------------------------------
@property
def tir_maps_path(self):
return fs.join(self.maps_path, tir_maps_name)
# -----------------------------------------------------------------
@property
def tir_maps_name(self):
return fs.name(self.tir_maps_path)
# -----------------------------------------------------------------
@property
def attenuation_maps_path(self):
return fs.join(self.maps_path, attenuation_maps_name)
# -----------------------------------------------------------------
@property
def attenuation_maps_name(self):
return fs.name(self.attenuation_maps_path)
# -----------------------------------------------------------------
@property
def old_maps_path(self):
return fs.join(self.maps_path, old_maps_name)
# -----------------------------------------------------------------
@property
def old_maps_name(self):
return fs.name(self.old_maps_path)
# -----------------------------------------------------------------
@property
def dust_maps_path(self):
return fs.join(self.maps_path, dust_maps_name)
# -----------------------------------------------------------------
@property
def dust_maps_name(self):
return fs.name(self.dust_maps_path)
# -----------------------------------------------------------------
@property
def young_maps_path(self):
return fs.join(self.maps_path, young_maps_name)
# -----------------------------------------------------------------
@property
def young_maps_name(self):
return fs.name(self.young_maps_path)
# -----------------------------------------------------------------
@property
def ionizing_maps_path(self):
return fs.join(self.maps_path, ionizing_maps_name)
# -----------------------------------------------------------------
@property
def ionizing_maps_name(self):
return fs.name(self.ionizing_maps_path)
# -----------------------------------------------------------------
@property
def rgb_maps_path(self):
return fs.join(self.maps_path, rgb_maps_name)
# -----------------------------------------------------------------
@property
def rgb_maps_name(self):
return fs.name(self.rgb_maps_path)
# -----------------------------------------------------------------
@property
def absorption_path(self):
return fs.join(self.path, absorption_name)
# -----------------------------------------------------------------
@property
def heating_path(self):
return fs.join(self.path, heating_name)
# -----------------------------------------------------------------
@property
def energy_path(self):
return fs.join(self.path, energy_name)
# -----------------------------------------------------------------
@property
def sfr_path(self):
return fs.join(self.path, sfr_name)
# -----------------------------------------------------------------
@property
def correlations_path(self):
return fs.join(self.path, correlations_name)
# -----------------------------------------------------------------
@property
def dust_grid_build_path(self):
return fs.join(self.path, dust_grid_build_name)
# -----------------------------------------------------------------
@property
def dust_grid_simulation_out_path(self):
return fs.join(self.dust_grid_build_path, "out")
# -----------------------------------------------------------------
def simulation_path_for_contribution(self, contribution, create=True):
"""
This function ...
:param contribution:
:param create:
:return:
"""
if create: return fs.create_directory_in(self.contributions_path, contribution)
else: return fs.join(self.contributions_path, contribution)
# -----------------------------------------------------------------
def ski_path_for_contribution(self, contribution):
"""
This function ...
:param contribution:
:return:
"""
return fs.join(self.simulation_path_for_contribution(contribution), self.galaxy_name + ".ski")
# -----------------------------------------------------------------
def output_path_for_contribution(self, contribution, create=True):
"""
This function ...
:param contribution:
:param create:
:return:
"""
if create: return fs.create_directory_in(self.simulation_path_for_contribution(contribution, create=create), "out")
else: return fs.join(self.simulation_path_for_contribution(contribution, create=create), "out")
# -----------------------------------------------------------------
def logfile_path_for_contribution(self, contribution):
"""
Thisf unction ...
:return:
"""
return fs.join(self.output_path_for_contribution(contribution), self.galaxy_name + "_log.txt")
# -----------------------------------------------------------------
def extraction_path_for_contribution(self, contribution):
"""
This function ...
:param contribution:
:return:
"""
return fs.join(self.contributions_path, contribution, extract_name)
# -----------------------------------------------------------------
@property
def analysis_run_name(self):
return self.info.name
# -----------------------------------------------------------------
@property
def dust_grid_simulation_logfile_path(self):
"""
This function ...
:return:
"""
# Determine the output path of the dust grid simulation
out_path = self.dust_grid_simulation_out_path
# Determine the log file path
logfile_path = fs.join(out_path, "dustgrid_log.txt")
# Return the log file path
return logfile_path
# -----------------------------------------------------------------
@abstractproperty
def has_dust_grid_simulation_logfile(self):
pass
# -----------------------------------------------------------------
@abstractproperty
def dust_grid_simulation_logfile(self):
pass
# -----------------------------------------------------------------
@abstractproperty
def dust_grid_tree(self):
pass
# -----------------------------------------------------------------
@lazyproperty
def ncells(self):
"""
This function ...
:return:
"""
# Read the log file
if self.has_dust_grid_simulation_logfile:
# Debugging
log.debug("Determining the number of dust cells by reading the dust grid simulation's log file ...")
# Load the log file and get the number of dust cells
return self.dust_grid_simulation_logfile.dust_cells
# Log file cannot be found
else:
# Debugging
log.debug("Determining the number of dust cells by reading the dust cell tree data file (this can take a while) ...")
# Get the number of leave nodes
#return self.dust_grid_tree.nleaves # requires loading the entire tree file!
return get_nleaves(self.dust_grid_tree_path)
# -----------------------------------------------------------------
def get_remote_script_input_paths_for_host(self, host_id):
"""
This function ...
:param host_id:
:return:
"""
paths = []
# Loop over the commands
for command in self.get_remote_script_commands_for_host(host_id):
input_path = command.split("-i ")[1]
if strings.is_quote_character(input_path[0]): input_path = input_path[1:].split(input_path[0])[0]
else: input_path = input_path.split(" ")[0]
paths.append(input_path)
# Return the list of paths
return paths
# -----------------------------------------------------------------
@abstractproperty
def has_maps_young(self):
pass
# -----------------------------------------------------------------
@abstractproperty
def nyoung_maps(self):
pass
# -----------------------------------------------------------------
@abstractproperty
def has_maps_tir(self):
pass
# -----------------------------------------------------------------
@abstractproperty
def ntir_maps(self):
pass
# -----------------------------------------------------------------
@abstractproperty
def has_maps_ssfr(self):
pass
# -----------------------------------------------------------------
@abstractproperty
def nssfr_maps(self):
pass
# -----------------------------------------------------------------
@abstractproperty
def has_maps_old(self):
pass
# -----------------------------------------------------------------
@abstractproperty
def nold_maps(self):
pass
# -----------------------------------------------------------------
@abstractproperty
def has_maps_ionizing(self):
pass
# -----------------------------------------------------------------
@abstractproperty
def nionizing_maps(self):
pass
# -----------------------------------------------------------------
@abstractproperty
def has_maps_dust(self):
pass
# -----------------------------------------------------------------
@abstractproperty
def ndust_maps(self):
pass
# -----------------------------------------------------------------
@abstractproperty
def has_maps_colours(self):
pass
# -----------------------------------------------------------------
@abstractproperty
def ncolour_maps(self):
pass
# -----------------------------------------------------------------
@abstractproperty
def has_maps_attenuation(self):
pass
# -----------------------------------------------------------------
@abstractproperty
def nattenuation_maps(self):
pass
# -----------------------------------------------------------------
@property
def has_maps(self):
return self.has_maps_attenuation or self.has_maps_colours or self.has_maps_dust or self.has_maps_ionizing or self.has_maps_old or self.has_maps_ssfr or self.has_maps_tir or self.has_maps_young
# -----------------------------------------------------------------
@abstractproperty
def has_heating(self):
pass
# -----------------------------------------------------------------
@property
def simulated_sed_path(self):
return fs.join(self.total_output_path, self.simulation_prefix + "_earth_sed.dat")
# -----------------------------------------------------------------
@abstractproperty
def has_simulated_sed(self):
pass
# -----------------------------------------------------------------
@property
def simulated_fluxes_path(self):
return fs.join(self.total_misc_path, self.simulation_prefix + "_earth_fluxes.dat")
# -----------------------------------------------------------------
@abstractproperty
def has_simulated_fluxes(self):
pass
# -----------------------------------------------------------------
@property
def simulation_prefix(self):
return self.galaxy_name
# -----------------------------------------------------------------
@property
def simulated_datacube_path(self):
return fs.join(self.total_output_path, self.galaxy_name + "_earth_total.fits")
# -----------------------------------------------------------------
@lazyproperty
def evaluation_fluxes_path(self):
return fs.create_directory_in(self.evaluation_path, "fluxes")
# -----------------------------------------------------------------
@lazyproperty
def evaluation_images_path(self):
return fs.create_directory_in(self.evaluation_path, "images")
# -----------------------------------------------------------------
@lazyproperty
def evaluation_image_names(self):
return fs.files_in_path(self.evaluation_images_path, returns="name", extension="fits", not_contains="error")
# -----------------------------------------------------------------
@lazyproperty
def evaluation_image_filters(self):
return [parse_filter(name) for name in self.evaluation_image_names]
# -----------------------------------------------------------------
def has_evaluation_image_for_filter(self, fltr):
"""
This function ...
:param fltr:
:return:
"""
return fltr in self.evaluation_image_filters
# -----------------------------------------------------------------
def get_evaluation_image_path_for_filter(self, fltr):
"""
This function ...
:param fltr:
:return:
"""
return fs.join(self.evaluation_images_path, str(fltr) + ".fits")
# -----------------------------------------------------------------
def get_evaluation_image_for_filter(self, fltr):
"""
This function ...
:param fltr:
:return:
"""
return Frame.from_file(self.get_evaluation_image_path_for_filter(fltr))
# -----------------------------------------------------------------
@lazyproperty
def evaluation_proper_images_path(self):
return fs.create_directory_in(self.evaluation_path, "proper_images")
# -----------------------------------------------------------------
@lazyproperty
def evaluation_proper_image_names(self):
return fs.files_in_path(self.evaluation_proper_images_path, returns="name", extension="fits")
# -----------------------------------------------------------------
@lazyproperty
def evaluation_proper_image_filters(self):
return [parse_filter(name) for name in self.evaluation_proper_image_names]
# -----------------------------------------------------------------
def has_evaluation_proper_image_for_filter(self, fltr):
"""
This function ...
:param fltr:
:return:
"""
return fltr in self.evaluation_proper_image_filters
# -----------------------------------------------------------------
def get_evaluation_proper_image_path_for_filter(self, fltr):
"""
This function ...
:param fltr:
:return:
"""
return fs.join(self.evaluation_proper_images_path, str(fltr) + ".fits")
# -----------------------------------------------------------------
def get_evaluation_proper_image_for_filter(self, fltr):
"""
This function ...
:param fltr:
:return:
"""
return Frame.from_file(self.get_evaluation_proper_image_path_for_filter(fltr))
# -----------------------------------------------------------------
@lazyproperty
def evaluation_residuals_path(self):
return fs.create_directory_in(self.evaluation_path, "residuals")
# -----------------------------------------------------------------
@lazyproperty
def evaluation_residuals_names(self):
return fs.files_in_path(self.evaluation_residuals_path, returns="name", extension="fits")
# -----------------------------------------------------------------
@lazyproperty
def evaluation_residuals_filters(self):
return [parse_filter(name) for name in self.evaluation_residuals_names]
# -----------------------------------------------------------------
def has_evaluation_residuals_for_filter(self, fltr):
"""
This function ...
:param fltr:
:return:
"""
return fltr in self.evaluation_residuals_filters
# -----------------------------------------------------------------
def get_evaluation_residuals_path_for_filter(self, fltr):
"""
Thisf unction ...
:param fltr:
:return:
"""
return fs.join(self.evaluation_residuals_path, str(fltr) + ".fits")
# -----------------------------------------------------------------
def get_evaluation_residuals_for_filter(self, fltr):
"""
Thisf unction ...
:param fltr:
:return:
"""
return Frame.from_file(self.get_evaluation_residuals_path_for_filter(fltr))
# -----------------------------------------------------------------
@lazyproperty
def evaluation_proper_residuals_path(self):
return fs.create_directory_in(self.evaluation_path, "proper_residuals")
# -----------------------------------------------------------------
@lazyproperty
def evaluation_proper_residuals_names(self):
return fs.files_in_path(self.evaluation_proper_residuals_path, returns="name", extension="fits")
# -----------------------------------------------------------------
@lazyproperty
def evaluation_proper_residuals_filters(self):
return [parse_filter(name) for name in self.evaluation_proper_residuals_names]
# -----------------------------------------------------------------
def has_evaluation_proper_residuals_for_filter(self, fltr):
"""
This function ...
:param fltr:
:return:
"""
return fltr in self.evaluation_proper_residuals_filters
# -----------------------------------------------------------------
def get_evaluation_proper_residuals_path_for_filter(self, fltr):
"""
This function ...
:param fltr:
:return:
"""
return fs.join(self.evaluation_proper_residuals_path, str(fltr) + ".fits")
# -----------------------------------------------------------------
def get_evaluation_proper_residuals_for_filter(self, fltr):
"""
This function ...
:param fltr:
:return:
"""
return Frame.from_file(self.get_evaluation_proper_residuals_path_for_filter(fltr))
# -----------------------------------------------------------------
@lazyproperty
def evaluation_residuals_distribution_names(self):
return fs.files_in_path(self.evaluation_proper_residuals_path, returns="name", extension="dat")
# -----------------------------------------------------------------
@lazyproperty
def evaluation_residuals_distributions_filters(self):
return [parse_filter(name.split("_distribution")[0]) for name in self.evaluation_residuals_distribution_names]
# -----------------------------------------------------------------
def has_evaluation_residuals_distribution_for_filter(self, fltr):
"""
This function ...
:param fltr:
:return:
"""
return fltr in self.evaluation_residuals_distributions_filters
# -----------------------------------------------------------------
def get_evaluation_residuals_distribution_path_for_filter(self, fltr):
"""
Thisf unction ...
:param fltr:
:return:
"""
return fs.join(self.evaluation_proper_residuals_path, str(fltr) + "_distribution.dat")
# -----------------------------------------------------------------
def get_evaluation_residuals_distribution_for_filter(self, fltr):
"""
This function ...
:param fltr:
:return:
"""
return Distribution.from_file(self.get_evaluation_residuals_distribution_path_for_filter(fltr))
# -----------------------------------------------------------------
@lazyproperty
def evaluation_proper_residuals_distribution_names(self):
return fs.files_in_path(self.evaluation_proper_residuals_path, returns="name", extension="dat")
# -----------------------------------------------------------------
@lazyproperty
def evaluation_proper_residuals_distribution_filters(self):
return [parse_filter(name.split("_distribution")[0]) for name in self.evaluation_proper_residuals_distribution_names]
# -----------------------------------------------------------------
def has_evaluation_proper_residuals_distribution_for_filter(self, fltr):
"""
This function ...
:param fltr:
:return:
"""
return fltr in self.evaluation_proper_residuals_distribution_filters
# -----------------------------------------------------------------
def get_evaluation_proper_residuals_distribution_path_for_filter(self, fltr):
"""
Thisf unction ...
:param fltr:
:return:
"""
return fs.join(self.evaluation_proper_residuals_path, str(fltr) + "_distribution.dat")
# -----------------------------------------------------------------
def get_evaluation_proper_residuals_distribution_for_filter(self, fltr):
"""
This function ...
:param fltr:
:return:
"""
return Distribution.from_file(self.get_evaluation_proper_residuals_distribution_path_for_filter(fltr))
# -----------------------------------------------------------------
class AnalysisRun(AnalysisRunBase):
"""
This class ...
"""
def __init__(self, galaxy_name=None, info=None, hubble_stage=None):
"""
The constructor ...
:param galaxy_name:
:param info:
:param hubble_stage:
"""
# Set the analysis run info
self.info = info
# Set galaxy properties
self.galaxy_name = galaxy_name
self.hubble_stage = hubble_stage
## Create directories
# The directory for the model
if not fs.is_directory(self.model_path): fs.create_directory(self.model_path)
# The directory for the projections and the instruments
if not fs.is_directory(self.projections_path): fs.create_directory(self.projections_path)
if not fs.is_directory(self.instruments_path): fs.create_directory(self.instruments_path)
# The directory for the dust grid output
if not fs.is_directory(self.dust_grid_build_path): fs.create_directory(self.dust_grid_build_path)
if not fs.is_directory(self.dust_grid_simulation_out_path): fs.create_directory(self.dust_grid_simulation_out_path)
# Contributions directory
if not fs.is_directory(self.contributions_path): fs.create_directory(self.contributions_path)
# Evaluation
if not fs.is_directory(self.evaluation_path): fs.create_directory(self.evaluation_path)
# Analysis directories
if not fs.is_directory(self.fluxes_path): fs.create_directory(self.fluxes_path)
if not fs.is_directory(self.images_path): fs.create_directory(self.images_path)
if not fs.is_directory(self.residuals_path): fs.create_directory(self.residuals_path)
if not fs.is_directory(self.properties_path): fs.create_directory(self.properties_path)
if not fs.is_directory(self.attenuation_path): fs.create_directory(self.attenuation_path)
if not fs.is_directory(self.colours_path): fs.create_directory(self.colours_path)
if not fs.is_directory(self.maps_path): fs.create_directory(self.maps_path)
if not fs.is_directory(self.absorption_path): fs.create_directory(self.absorption_path)
if not fs.is_directory(self.heating_path): fs.create_directory(self.heating_path)
if not fs.is_directory(self.energy_path): fs.create_directory(self.energy_path)
if not fs.is_directory(self.sfr_path): fs.create_directory(self.sfr_path)
if not fs.is_directory(self.correlations_path): fs.create_directory(self.correlations_path)
# Maps subdirectories
if not fs.is_directory(self.colour_maps_path): fs.create_directory(self.colour_maps_path)
if not fs.is_directory(self.ssfr_maps_path): fs.create_directory(self.ssfr_maps_path)
if not fs.is_directory(self.tir_maps_path): fs.create_directory(self.tir_maps_path)
if not fs.is_directory(self.attenuation_maps_path): fs.create_directory(self.attenuation_maps_path)
if not fs.is_directory(self.old_maps_path): fs.create_directory(self.old_maps_path)
if not fs.is_directory(self.dust_maps_path): fs.create_directory(self.dust_maps_path)
if not fs.is_directory(self.young_maps_path): fs.create_directory(self.young_maps_path)
if not fs.is_directory(self.ionizing_maps_path): fs.create_directory(self.ionizing_maps_path)
if not fs.is_directory(self.rgb_maps_path): fs.create_directory(self.rgb_maps_path)
# -----------------------------------------------------------------
@classmethod
def from_name(cls, modeling_path, name, hubble_stage=None):
"""
This function ...
:param modeling_path:
:param name:
:param hubble_stage:
:return:
"""
analysis_path = fs.join(modeling_path, "analysis")
run_path = fs.join(analysis_path, name)
return cls.from_path(run_path, hubble_stage=hubble_stage)
# -----------------------------------------------------------------
@classmethod
def from_path(cls, path, hubble_stage=None):
"""
This function ...
:param path:
:param hubble_stage:
:return:
"""
# Determine the info path
info_path = fs.join(path, info_filename)
if not fs.is_file(info_path): raise IOError("Could not find the info file")
else: return cls.from_info(info_path, hubble_stage=hubble_stage)
# -----------------------------------------------------------------
@classmethod
def from_info(cls, info_path, hubble_stage=None):
"""
This function ...
:param info_path:
:param hubble_stage:
:return:
"""
# Load the analysis run info
info = AnalysisRunInfo.from_file(info_path)
# Create the instance
run = cls(info=info, hubble_stage=hubble_stage)
# Set galaxy name
modeling_path = fs.directory_of(fs.directory_of(run.info.path))
run.galaxy_name = fs.name(modeling_path)
# Return the analysis run object
return run
# -----------------------------------------------------------------
@property
def has_dust_grid_simulation_logfile(self):
return fs.is_file(self.dust_grid_simulation_logfile_path)
# -----------------------------------------------------------------
@lazyproperty
def dust_grid_simulation_logfile(self):
return LogFile.from_file(self.dust_grid_simulation_logfile_path)
# -----------------------------------------------------------------
@property
def has_output(self):
return fs.has_files_in_path(self.output_path)
# -----------------------------------------------------------------
@property
def has_logfile(self):
return fs.is_file(self.logfile_path)
# -----------------------------------------------------------------
@lazyproperty
def logfile(self):
return LogFile.from_file(self.total_logfile_path)
# -----------------------------------------------------------------
@property
def has_misc(self):
return fs.has_files_in_path(self.total_misc_path)
# -----------------------------------------------------------------
@property
def has_extracted(self):
return fs.has_files_in_path(self.total_extr_path)
# -----------------------------------------------------------------
@property
def has_progress(self):
return fs.is_file(self.progress_path)
# -----------------------------------------------------------------
@property
def has_timeline(self):
return fs.is_file(self.timeline_path)
# -----------------------------------------------------------------
@property
def has_memory(self):
return fs.is_file(self.memory_path)
# -----------------------------------------------------------------
@lazyproperty
def progress(self):
return ProgressTable.from_file(self.progress_path)
# -----------------------------------------------------------------
@lazyproperty
def timeline(self):
return TimeLineTable.from_file(self.timeline_path)
# -----------------------------------------------------------------
@lazyproperty
def memory(self):
return MemoryUsageTable.from_file(self.memory_path)
# -----------------------------------------------------------------
@property
def has_plots(self):
return fs.has_files_in_path(self.plot_path)
# -----------------------------------------------------------------
@property
def has_attenuation(self):
return fs.is_directory(self.attenuation_path) and not fs.is_empty(self.attenuation_path)
# -----------------------------------------------------------------
@property
def has_colours(self):
return fs.is_directory(self.colours_path) and not fs.is_empty(self.colours_path)
# -----------------------------------------------------------------
@property
def colour_names(self):
return fs.files_in_path(self.colours_simulated_path, extension="fits", returns="name")
# -----------------------------------------------------------------
@property
def has_residuals(self):
return fs.is_directory(self.residuals_path) and not fs.is_empty(self.residuals_path)
# -----------------------------------------------------------------
@property
def residual_image_names(self):
return fs.files_in_path(self.residuals_path, extension="fits", not_contains=["significance"], returns="name")
# -----------------------------------------------------------------
@property
def has_maps_attenuation(self):
return fs.is_directory(self.attenuation_maps_path) and not fs.is_empty(self.attenuation_maps_path)
# -----------------------------------------------------------------
@property
def nattenuation_maps(self):
if fs.has_files_in_path(self.attenuation_maps_path, extension="fits"): return fs.nfiles_in_path(self.attenuation_maps_path, extension="fits")
else: return fs.nfiles_in_path(self.attenuation_maps_path, extension="fits", recursive=True, recursion_level=1)
# -----------------------------------------------------------------
@property
def has_maps_colours(self):
return fs.is_directory(self.colour_maps_path) and not fs.is_empty(self.colour_maps_path)
# -----------------------------------------------------------------
@property
def ncolour_maps(self):
if fs.has_files_in_path(self.colour_maps_path, extension="fits"): return fs.nfiles_in_path(self.colour_maps_path, extension="fits")
else: return fs.nfiles_in_path(self.colour_maps_path, extension="fits", recursive=True, recursion_level=1)
# -----------------------------------------------------------------
@property
def has_maps_dust(self):
return fs.is_directory(self.dust_maps_path) and not fs.is_empty(self.dust_maps_path)
# -----------------------------------------------------------------
@property
def ndust_maps(self):
if fs.has_files_in_path(self.dust_maps_path, extension="fits"): return fs.nfiles_in_path(self.dust_maps_path, extension="fits")
else: return fs.nfiles_in_path(self.dust_maps_path, extension="fits", recursive=True, recursion_level=1)
# -----------------------------------------------------------------
@property
def has_maps_ionizing(self):
return fs.is_directory(self.ionizing_maps_path) and not fs.is_empty(self.ionizing_maps_path)
# -----------------------------------------------------------------
@property
def nionizing_maps(self):
if fs.has_files_in_path(self.ionizing_maps_path, extension="fits"): return fs.nfiles_in_path(self.ionizing_maps_path, extension="fits")
else: return fs.nfiles_in_path(self.ionizing_maps_path, extension="fits", recursive=True, recursion_level=1)
# -----------------------------------------------------------------
@property
def has_maps_rgb(self):
return fs.is_directory(self.rgb_maps_path) and not fs.is_empty(self.rgb_maps_path)
# -----------------------------------------------------------------
@property
def has_maps_old(self):
return fs.is_directory(self.old_maps_path) and not fs.is_empty(self.old_maps_path)
# -----------------------------------------------------------------
@property
def nold_maps(self):
if fs.has_files_in_path(self.old_maps_path, extension="fits"): return fs.nfiles_in_path(self.old_maps_path, extension="fits")
else: return fs.nfiles_in_path(self.old_maps_path, extension="fits", recursive=True, recursion_level=1)
# -----------------------------------------------------------------
@property
def has_maps_ssfr(self):
return fs.is_directory(self.ssfr_maps_path) and not fs.is_empty(self.ssfr_maps_path)
# -----------------------------------------------------------------
@property
def nssfr_maps(self):
if fs.has_files_in_path(self.ssfr_maps_path, extension="fits"): return fs.nfiles_in_path(self.ssfr_maps_path, extension="fits")
else: return fs.nfiles_in_path(self.ssfr_maps_path, extension="fits", recursive=True, recursion_level=1)
# -----------------------------------------------------------------
@property
def has_maps_tir(self):
return fs.is_directory(self.tir_maps_path) and not fs.is_empty(self.tir_maps_path)
# -----------------------------------------------------------------
@property
def ntir_maps(self):
if fs.has_files_in_path(self.tir_maps_path, extension="fits"): return fs.nfiles_in_path(self.tir_maps_path, extension="fits")
else: return fs.nfiles_in_path(self.tir_maps_path, extension="fits", recursive=True, recursion_level=1)
# -----------------------------------------------------------------
@property
def has_maps_young(self):
return fs.is_directory(self.young_maps_path) and not fs.is_empty(self.young_maps_path)
# -----------------------------------------------------------------
@property
def nyoung_maps(self):
if fs.has_files_in_path(self.young_maps_path, extension="fits"): return fs.nfiles_in_path(self.young_maps_path, extension="fits")
else: return fs.nfiles_in_path(self.young_maps_path, extension="fits", recursive=True, recursion_level=1)
# -----------------------------------------------------------------
@property
def has_heating(self):
return fs.is_file(self.heating_config_path)
# -----------------------------------------------------------------
@lazyproperty
def nfiles(self):
return fs.nfiles_in_path(self.path, recursive=True)
# -----------------------------------------------------------------
@lazyproperty
def disk_space(self):
return fs.directory_size(self.path)
# -----------------------------------------------------------------
@property
def analysis_path(self):
return fs.directory_of(self.path)
# -----------------------------------------------------------------
@property
def modeling_path(self):
return fs.directory_of(self.analysis_path)
# -----------------------------------------------------------------
@property
def path(self):
return self.info.path
# -----------------------------------------------------------------
@lazyproperty
def config(self):
return Configuration.from_file(self.config_path)
# -----------------------------------------------------------------
@lazyproperty
def heating_config(self):
return Configuration.from_file(self.heating_config_path)
# -----------------------------------------------------------------
@lazyproperty
def wavelength_grid(self):
return WavelengthGrid.from_skirt_input(self.wavelength_grid_path)
# -----------------------------------------------------------------
@lazyproperty
def dust_grid(self):
return load_grid(self.dust_grid_path)
# -----------------------------------------------------------------
@property
def analysis_run_path(self):
return self.info.path
# -----------------------------------------------------------------
@property
def ski_file(self):
return SkiFile(self.ski_file_path)
# -----------------------------------------------------------------
@property
def input_paths(self):
return load_dict(self.input_file_path)
# -----------------------------------------------------------------
@property
def dust_grid_tree_path(self):
return fs.join(self.dust_grid_build_path, dust_grid_tree_filename)
# -----------------------------------------------------------------
@lazyproperty
def dust_grid_tree(self):
"""
This function ...
:return:
"""
# Give debug message
log.debug("Loading the dust grid tree, this may take a while (depending on the number of nodes) ...")
# Return the tree
return DustGridTree.from_file(self.dust_grid_tree_path)
# -----------------------------------------------------------------
def create_file_tree_dust_grid(self, search_method="Neighbor", write=False):
"""
This function ...
:param search_method:
:param write:
:return:
"""
grid = FileTreeDustGrid(filename=self.dust_grid_tree_path, search_method=search_method, write=write)
return grid
# -----------------------------------------------------------------
@lazyproperty
def has_dust_grid_tree(self):
return fs.is_file(self.dust_grid_tree_path)
# -----------------------------------------------------------------
@property
def model_path(self):
return fs.join(self.path, model_name)
# -----------------------------------------------------------------
@property
def instruments_path(self):
return fs.join(self.path, instruments_name)
# -----------------------------------------------------------------
@property
def projections_path(self):
return fs.join(self.path, projections_name)
# -----------------------------------------------------------------
@property
def earth_projection_path(self):
return fs.join(self.projections_path, earth_projection_filename)
# -----------------------------------------------------------------
@property
def faceon_projection_path(self):
return fs.join(self.projections_path, faceon_projection_filename)
# -----------------------------------------------------------------
@property
def edgeon_projection_path(self):
return fs.join(self.projections_path, edgeon_projection_filename)
# -----------------------------------------------------------------
@lazyproperty
def earth_projection(self):
return GalaxyProjection.from_file(self.earth_projection_path)
# -----------------------------------------------------------------
@lazyproperty
def edgeon_projection(self):
return EdgeOnProjection.from_file(self.edgeon_projection_path)
# -----------------------------------------------------------------
@lazyproperty
def faceon_projection(self):
return FaceOnProjection.from_file(self.faceon_projection_path)
# -----------------------------------------------------------------
@property
def sed_earth_instrument_path(self):
return fs.join(self.instruments_path, sed_earth_instrument_filename)
# -----------------------------------------------------------------
@property
def has_sed_earth_instrument(self):
return fs.is_file(self.sed_earth_instrument_path)
# -----------------------------------------------------------------
@property
def full_sed_earth_instrument_path(self):
return fs.join(self.instruments_path, full_sed_earth_instrument_filename)
# -----------------------------------------------------------------
@property
def has_full_sed_earth_instrument(self):
return fs.is_file(self.full_sed_earth_instrument_path)
# -----------------------------------------------------------------
@property
def simple_earth_instrument_path(self):
return fs.join(self.instruments_path, simple_earth_instrument_filename)
# -----------------------------------------------------------------
@property
def has_simple_earth_instrument(self):
return fs.is_file(self.simple_earth_instrument_path)
# -----------------------------------------------------------------
@property
def full_earth_instrument_path(self):
return fs.join(self.instruments_path, full_earth_instrument_filename)
# -----------------------------------------------------------------
@property
def has_full_earth_instrument(self):
return fs.is_file(self.full_earth_instrument_path)
# -----------------------------------------------------------------
@property
def simple_faceon_instrument_path(self):
return fs.join(self.instruments_path, simple_faceon_instrument_filename)
# -----------------------------------------------------------------
@property
def has_simple_faceon_instrument(self):
return fs.is_file(self.simple_faceon_instrument_path)
# -----------------------------------------------------------------
@property
def full_faceon_instrument_path(self):
return fs.join(self.instruments_path, full_faceon_instrument_filename)
# -----------------------------------------------------------------
@property
def has_full_faceon_instrument(self):
return fs.is_file(self.full_faceon_instrument_path)
# -----------------------------------------------------------------
@property
def simple_edgeon_instrument_path(self):
return fs.join(self.instruments_path, simple_edgeon_instrument_filename)
# -----------------------------------------------------------------
@property
def has_simple_edgeon_instrument(self):
return fs.is_file(self.simple_edgeon_instrument_path)
# -----------------------------------------------------------------
@property
def full_edgeon_instrument_path(self):
return fs.join(self.instruments_path, full_edgeon_instrument_filename)
# -----------------------------------------------------------------
@property
def has_full_edgeon_instrument(self):
return fs.is_file(self.full_edgeon_instrument_path)
# -----------------------------------------------------------------
@lazyproperty
def sed_earth_instrument(self):
"""
This function ...
:return:
"""
if not self.has_sed_earth_instrument:
instrument = SEDInstrument.from_projection(self.earth_projection)
instrument.saveto(self.sed_earth_instrument_path)
return SEDInstrument.from_file(self.sed_earth_instrument_path)
# -----------------------------------------------------------------
@lazyproperty
def simple_earth_instrument(self):
"""
This function ...
:return:
"""
if not self.has_simple_earth_instrument:
instrument = SimpleInstrument.from_projection(self.earth_projection)
instrument.saveto(self.sed_earth_instrument_path)
return SimpleInstrument.from_file(self.simple_earth_instrument_path)
# -----------------------------------------------------------------
@lazyproperty
def full_earth_instrument(self):
"""
This function ...
:return:
"""
if not self.has_full_earth_instrument:
instrument = FullInstrument.from_projection(self.earth_projection)
instrument.saveto(self.full_earth_instrument_path)
return FullInstrument.from_file(self.full_earth_instrument_path)
# -----------------------------------------------------------------
@lazyproperty
def simple_faceon_instrument(self):
"""
This function ...
:return:
"""
if not self.has_simple_faceon_instrument:
instrument = SimpleInstrument.from_projection(self.faceon_projection)
instrument.saveto(self.simple_faceon_instrument_path)
return SimpleInstrument.from_file(self.simple_faceon_instrument_path)
# -----------------------------------------------------------------
@lazyproperty
def full_faceon_instrument(self):
"""
This function ...
:return:
"""
if not self.has_full_faceon_instrument:
instrument = FullInstrument.from_projection(self.faceon_projection)
instrument.saveto(self.full_faceon_instrument_path)
return FullInstrument.from_file(self.full_faceon_instrument_path)
# -----------------------------------------------------------------
@lazyproperty
def simple_edgeon_instrument(self):
"""
This function ...
:return:
"""
if not self.has_simple_edgeon_instrument:
instrument = SimpleInstrument.from_projection(self.edgeon_projection)
instrument.saveto(self.simple_edgeon_instrument_path)
return SimpleInstrument.from_file(self.simple_edgeon_instrument_path)
# -----------------------------------------------------------------
@lazyproperty
def full_edgeon_instrument(self):
"""
This function ...
:return:
"""
if not self.has_full_edgeon_instrument:
instrument = FullInstrument.from_projection(self.edgeon_projection)
instrument.saveto(self.full_edgeon_instrument_path)
return FullInstrument.from_file(self.full_edgeon_instrument_path)
# -----------------------------------------------------------------
@lazyproperty
def galaxy_properties_path(self):
from ..core.environment import properties_name, data_name
return fs.join(self.modeling_path, data_name, properties_name)
# -----------------------------------------------------------------
@lazyproperty
def galaxy_properties(self):
return GalaxyProperties.from_file(self.galaxy_properties_path)
# -----------------------------------------------------------------
@property
def galaxy_distance(self):
return self.galaxy_properties.distance
# -----------------------------------------------------------------
@property
def galaxy_center(self):
return self.galaxy_properties.center
# -----------------------------------------------------------------
@lazyproperty
def truncation_path(self):
return fs.join(self.modeling_path, "truncated")
# -----------------------------------------------------------------
@lazyproperty
def truncation_ellipse_path(self):
return fs.join(self.truncation_path, "ellipse.reg")
# -----------------------------------------------------------------
@lazyproperty
def truncation_ellipse(self):
return SkyRegionList.from_file(self.truncation_ellipse_path)[0]
# -----------------------------------------------------------------
@lazyproperty
def fitting_run_name(self):
return self.info.fitting_run
# -----------------------------------------------------------------
@lazyproperty
def fitting_run(self):
return FittingRun.from_name(self.modeling_path, self.fitting_run_name)
# -----------------------------------------------------------------
@lazyproperty
def model_suite(self):
from ..build.suite import ModelSuite
return ModelSuite.from_modeling_path(self.modeling_path)
# -----------------------------------------------------------------
@property
def model_definition_path(self):
return self.model_suite.get_model_path(self.model_name)
# -----------------------------------------------------------------
@property
def model_definition_stellar_path(self):
return fs.join(self.model_definition_path, "stellar")
# -----------------------------------------------------------------
@property
def model_definition_dust_path(self):
return fs.join(self.model_definition_path, "dust")
# -----------------------------------------------------------------
@lazyproperty
def model_stellar_path(self):
return fs.create_directory_in(self.model_path, "stellar")
# -----------------------------------------------------------------
@lazyproperty
def model_dust_path(self):
return fs.create_directory_in(self.model_path, "dust")
# -----------------------------------------------------------------
@lazyproperty
def stellar_component_paths(self):
# Return the paths as a dictionary based on component name
return fs.directories_in_path(self.model_stellar_path, returns="dict")
# -----------------------------------------------------------------
@property
def stellar_component_names(self):
return self.stellar_component_paths.keys()
# -----------------------------------------------------------------
@property
def nstellar_components(self):
return len(self.stellar_component_names)
# -----------------------------------------------------------------
@lazyproperty
def dust_component_paths(self):
# Return the paths as a dictionary based on component name
return fs.directories_in_path(self.model_dust_path, returns="dict")
# -----------------------------------------------------------------
@property
def dust_component_names(self):
return self.dust_component_paths.keys()
# -----------------------------------------------------------------
@property
def ndust_components(self):
return len(self.dust_component_names)
# -----------------------------------------------------------------
@property
def has_model(self):
"""
This function ...
:return:
"""
if self.nstellar_components > 0 and self.ndust_components > 0: return True
elif self.nstellar_components == 0 and self.ndust_components == 0: return False
else: raise RuntimeError("Inconsistent state of the model: there are stellar components but no dust components or vice versa")
# -----------------------------------------------------------------
@lazyproperty
def model_definition(self):
"""
This function ...
:return:
"""
from .initialization import create_model_definition_in_path
# Has the model definition been created yet?
if not self.has_model: return create_model_definition_in_path(self.model_name, self.model_path, self.model_definition_stellar_path, self.model_definition_dust_path, parameter_values=self.parameter_values)
# Create the model definition and return
else: return ModelDefinition(model_name, self.model_path, stellar_paths=self.stellar_component_paths, dust_paths=self.dust_component_paths)
# -----------------------------------------------------------------
@property
def model_old_map_name(self):
return self.model_suite.get_old_map_name_for_model(self.model_name)
# -----------------------------------------------------------------
@property
def model_young_map_name(self):
return self.model_suite.get_young_map_name_for_model(self.model_name)
# -----------------------------------------------------------------
@property
def model_ionizing_map_name(self):
return self.model_suite.get_ionizing_map_name_for_model(self.model_name)
# -----------------------------------------------------------------
@property
def model_dust_map_name(self):
return self.model_suite.get_dust_map_name_for_model(self.model_name)
# -----------------------------------------------------------------
@property
def model_old_map(self):
return self.model_suite.load_stellar_component_map(self.model_name, "old")
# -----------------------------------------------------------------
@property
def model_young_map(self):
return self.model_suite.load_stellar_component_map(self.model_name, "young")
# -----------------------------------------------------------------
@property
def model_ionizing_map(self):
return self.model_suite.load_stellar_component_map(self.model_name, "ionizing")
# -----------------------------------------------------------------
@property
def model_dust_map(self):
return self.model_suite.load_dust_component_map(self.model_name, "disk")
# -----------------------------------------------------------------
@lazyproperty
def generation_name(self):
return self.info.generation_name
# -----------------------------------------------------------------
@lazyproperty
def simulation_name(self):
return self.info.simulation_name
# -----------------------------------------------------------------
@lazyproperty
def parameter_values(self):
"""
This function ...
:return:
"""
# Get the ski file
ski = self.ski_file
# Get the values of all the labeled parameters
values = ski.get_labeled_values()
# Return the parameter values
return values
# -----------------------------------------------------------------
@property
def free_parameter_labels(self):
return self.parameter_values.keys()
# -----------------------------------------------------------------
@lazyproperty
def chi_squared(self):
return self.info.chi_squared
# -----------------------------------------------------------------
@lazyproperty
def model(self):
"""
This function ...
:return:
"""
# Create the model and return it
return RTModel(self.model_definition, simulation_name=self.simulation_name, chi_squared=self.chi_squared,
free_parameter_labels=self.free_parameter_labels, wavelength_grid=self.wavelength_grid,
observed_total_output_path=self.total_output_path, observed_bulge_output_path=self.bulge_output_path,
observed_disk_output_path=self.disk_output_path, observed_old_output_path=self.old_output_path,
observed_young_output_path=self.young_output_path, observed_sfr_output_path=self.ionizing_output_path,
observed_unevolved_output_path=self.unevolved_output_path,
observed_extra_output_path=self.extra_output_path, center=self.galaxy_center,
galaxy_name=self.galaxy_name, hubble_stage=self.hubble_stage, earth_wcs=self.reference_wcs,
truncation_ellipse=self.truncation_ellipse)
# -----------------------------------------------------------------
@lazyproperty
def mappings(self):
return self.model.mappings
# -----------------------------------------------------------------
@lazyproperty
def normalized_mappings(self):
return self.model.normalized_mappings
# -----------------------------------------------------------------
@property
def uses_grid_resolution(self):
return self.info.reference_deprojection == "grid"
# -----------------------------------------------------------------
@lazyproperty
def reference_deprojection_component_name(self):
if self.uses_grid_resolution: return None
return self.info.reference_deprojection
# -----------------------------------------------------------------
@lazyproperty
def is_stellar_reference_deprojection(self):
if self.uses_grid_resolution: raise ValueError("This function shouldn't be called")
return self.reference_deprojection_component_name in self.model_suite.get_stellar_component_names(self.model_name)
# -----------------------------------------------------------------
@lazyproperty
def is_dust_reference_deprojection(self):
if self.uses_grid_resolution: raise ValueError("This function shouldn't be called")
return self.reference_deprojection_component_name in self.model_suite.get_dust_component_names(self.model_name)
# -----------------------------------------------------------------
@lazyproperty
def reference_deprojection_component(self):
if self.reference_deprojection_component_name is None: return None
else:
if self.is_stellar_reference_deprojection: return self.model_suite.load_stellar_component(self.model_name, self.reference_deprojection_component_name, add_map=False)
elif self.is_dust_reference_deprojection: return self.model_suite.load_dust_component(self.model_name, self.reference_deprojection_component_name, add_map=False)
else: raise ValueError("Reference deprojection component name '" + self.reference_deprojection_component_name + "' not recognized as either stellar or dust")
# -----------------------------------------------------------------
@lazyproperty
def reference_deprojection(self):
if self.reference_deprojection_component_name is None: return None
else:
if self.is_stellar_reference_deprojection: return self.model_suite.load_stellar_component_deprojection(self.model_name, self.reference_deprojection_component_name, add_map=False)
elif self.is_dust_reference_deprojection: return self.model_suite.load_dust_component_deprojection(self.model_name, self.reference_deprojection_component_name, add_map=False)
else: raise ValueError("Reference deprojection component name '" + self.reference_deprojection_component_name + "' not recognized as either stellar or dust")
# -----------------------------------------------------------------
@lazyproperty
def reference_map(self):
if self.reference_deprojection_component_name is None: return None
else:
if self.is_stellar_reference_deprojection: return self.model_suite.load_stellar_component_map(self.model_name, self.reference_deprojection_component_name)
elif self.is_dust_reference_deprojection: return self.model_suite.load_dust_component_map(self.model_name, self.reference_deprojection_component_name)
else: raise ValueError("Reference deprojection component name '" + self.reference_deprojection_component_name + "' not recognized as either stellar or dust")
# -----------------------------------------------------------------
@lazyproperty
def reference_map_path(self):
if self.reference_deprojection_component_name is None: return None
else:
if self.is_stellar_reference_deprojection: return self.model_suite.get_stellar_component_map_path(self.model_name, self.reference_deprojection_component_name)
elif self.is_dust_reference_deprojection: return self.model_suite.get_dust_component_map_path(self.model_name, self.reference_deprojection_component_name)
else: raise ValueError("Reference deprojection component name '" + self.reference_deprojection_component_name + "' not recognized as either stellar or dust")
# -----------------------------------------------------------------
@lazyproperty
def reference_wcs(self):
if self.reference_map_path is None: return None
else: return CoordinateSystem.from_file(self.reference_map_path)
# -----------------------------------------------------------------
@property
def remote_script_paths(self):
return fs.files_in_path(self.path, extension="sh")
# -----------------------------------------------------------------
def get_remote_script_commands(self):
"""
This fucntion ...
:return:
"""
commands = dict()
# Loop over the script paths
for path in self.remote_script_paths:
# Get host ID
host_id = fs.strip_extension(fs.name(path))
lines = []
# Open the file
for line in fs.read_lines(path):
if line.startswith("#"): continue
if not line.strip(): continue
lines.append(line)
# Set the commands
commands[host_id] = lines
# Return the commands
return commands
# -----------------------------------------------------------------
def get_remote_script_commands_for_host(self, host_id):
"""
This function ...
:param host_id:
:return:
"""
commands = self.get_remote_script_commands()
if host_id in commands: return commands[host_id]
else: return []
# -----------------------------------------------------------------
def get_ski_for_contribution(self, contribution):
"""
This function ...
:param contribution:
:return:
"""
return SkiFile(self.ski_path_for_contribution(contribution))
# -----------------------------------------------------------------
@property
def has_simulated_sed(self):
return fs.is_file(self.simulated_sed_path)
# -----------------------------------------------------------------
@lazyproperty
def simulated_sed(self):
return SED.from_skirt(self.simulated_sed_path)
# -----------------------------------------------------------------
@property
def has_simulated_fluxes(self):
return fs.is_file(self.simulated_fluxes_path)
# -----------------------------------------------------------------
@lazyproperty
def simulated_fluxes(self):
return ObservedSED.from_file(self.simulated_fluxes_path)
# -----------------------------------------------------------------
@lazyproperty
def simulated_datacube(self):
"""
This function ...
:return:
"""
# Load the datacube
datacube = DataCube.from_file(self.simulated_datacube_path, self.wavelength_grid)
# Set the wcs
datacube.wcs = self.reference_wcs
# Return the datacube
return datacube
# -----------------------------------------------------------------
@lazyproperty
def simulated_dataset(self):
"""
This function ...
:return:
"""
#get_name_function = lambda filename: filename.split("__")[1]
#return DataSet.from_directory(self.total_misc_path, get_name=get_name_function)
return DataSet.from_directory(self.images_path)
# -----------------------------------------------------------------
@lazyproperty
def simulated_frame_list(self):
return self.simulated_dataset.get_framelist(named=False) # on filter
# -----------------------------------------------------------------
@lazyproperty
def simulated_named_frame_list(self):
return self.simulated_dataset.get_framelist(named=True) # on name
# -----------------------------------------------------------------
def get_simulated_frame_for_filter(self, fltr):
"""
THis function ...
:param fltr:
:return:
"""
# Return the simulated frame
return self.simulated_frame_list[fltr]
# -----------------------------------------------------------------
@lazyproperty
def maps_collection(self):
from ..maps.collection import MapsCollection
return MapsCollection.from_modeling_path(self.modeling_path, analysis_run_name=self.name)
# -----------------------------------------------------------------
@lazyproperty
def observation_maps_collection(self):
from ..maps.collection import MapsCollection
return MapsCollection.from_modeling_path(self.modeling_path)
# -----------------------------------------------------------------
@property
def colours_methods(self):
return self.observation_maps_collection.get_colours_methods(flatten=True)
# -----------------------------------------------------------------
@property
def colours_origins(self):
return self.observation_maps_collection.get_colours_origins(flatten=True)
# -----------------------------------------------------------------
@property
def ssfr_methods(self):
return self.observation_maps_collection.get_ssfr_methods(flatten=True)
# -----------------------------------------------------------------
@property
def ssfr_origins(self):
return self.observation_maps_collection.get_ssfr_origins(flatten=True)
# -----------------------------------------------------------------
@property
def tir_methods(self):
return self.observation_maps_collection.get_tir_methods(flatten=True)
# -----------------------------------------------------------------
@property
def tir_origins(self):
return self.observation_maps_collection.get_tir_origins(flatten=True)
# -----------------------------------------------------------------
@property
def attenuation_methods(self):
return self.observation_maps_collection.get_attenuation_methods(flatten=True)
# -----------------------------------------------------------------
@property
def attenuation_origins(self):
return self.observation_maps_collection.get_attenuation_origins(flatten=True)
# -----------------------------------------------------------------
@property
def old_methods(self):
return self.observation_maps_collection.get_old_methods(flatten=False)
# -----------------------------------------------------------------
@property
def old_map_methods(self):
#return self.old_methods[self.model_old_map_name] # for flattened
#return find_value_for_unique_key_nested(self.old_methods, self.model_old_map_name)
return self.old_methods["disk"][self.model_old_map_name]
# -----------------------------------------------------------------
@property
def old_origins(self):
return self.observation_maps_collection.get_old_origins(flatten=False)
# -----------------------------------------------------------------
@property
def old_map_origins(self):
#return self.old_origins[self.model_old_map_name] # for flattened
#return find_value_for_unique_key_nested(self.old_origins, self.model_old_map_name)
return self.old_origins["disk"][self.model_old_map_name]
# -----------------------------------------------------------------
@property
def old_map_method_and_name(self):
return "disk", self.model_old_map_name
# -----------------------------------------------------------------
@property
def young_methods(self):
return self.observation_maps_collection.get_young_methods(flatten=False)
# -----------------------------------------------------------------
@property
def young_map_methods(self):
#return self.young_methods[self.model_young_map_name]
return find_value_for_unique_key_nested(self.young_methods, self.model_young_map_name)
# -----------------------------------------------------------------
@property
def young_origins(self):
return self.observation_maps_collection.get_young_origins(flatten=False)
# -----------------------------------------------------------------
@property
def young_map_origins(self):
#return self.young_origins[self.model_young_map_name]
return find_value_for_unique_key_nested(self.young_origins, self.model_young_map_name)
# -----------------------------------------------------------------
@property
def young_map_method_and_name(self):
"""
This function ...
:return:
"""
keys = find_keys_for_unique_key_nested(self.young_methods, self.model_young_map_name)
if len(keys) == 1:
method = None
map_name = keys[0]
elif len(keys) == 2:
method = keys[0]
map_name = keys[1]
else: raise ValueError("Something is wrong")
return method, map_name
# -----------------------------------------------------------------
@property
def ionizing_methods(self):
return self.observation_maps_collection.get_ionizing_methods(flatten=False)
# -----------------------------------------------------------------
@property
def ionizing_map_methods(self):
#return self.ionizing_methods[self.model_ionizing_map_name]
return find_value_for_unique_key_nested(self.ionizing_methods, self.model_ionizing_map_name)
# -----------------------------------------------------------------
@property
def ionizing_origins(self):
return self.observation_maps_collection.get_ionizing_origins(flatten=False)
# -----------------------------------------------------------------
@property
def ionizing_map_origins(self):
#return self.ionizing_origins[self.model_ionizing_map_name]
return find_value_for_unique_key_nested(self.ionizing_origins, self.model_ionizing_map_name)
# -----------------------------------------------------------------
@property
def ionizing_map_method_and_name(self):
"""
This function ...
:return:
"""
keys = find_keys_for_unique_key_nested(self.ionizing_methods, self.model_ionizing_map_name)
if len(keys) == 1:
method = None
map_name = keys[0]
elif len(keys) == 2:
method = keys[0]
map_name = keys[1]
else: raise ValueError("Something is wrong")
return method, map_name
# -----------------------------------------------------------------
@property
def dust_methods(self):
return self.observation_maps_collection.get_dust_methods(flatten=False)
# -----------------------------------------------------------------
@property
def dust_map_methods(self):
#return self.dust_methods[self.model_dust_map_name]
try: return find_value_for_unique_key_nested(self.dust_methods, self.model_dust_map_name)
except ValueError: return find_value_for_unique_key_nested(self.dust_methods, self.model_dust_map_name.split("_", 1)[1])
# -----------------------------------------------------------------
@property
def dust_origins(self):
return self.observation_maps_collection.get_dust_origins(flatten=False)
# -----------------------------------------------------------------
@property
def dust_map_origins(self):
"""
This function ...
:return:
"""
#return self.dust_origins[self.model_dust_map_name]
try: return find_value_for_unique_key_nested(self.dust_origins, self.model_dust_map_name)
except ValueError: return find_value_for_unique_key_nested(self.dust_origins, self.model_dust_map_name.split("_", 1)[1])
# -----------------------------------------------------------------
@property
def dust_map_method_and_name(self):
"""
This function ...
:return:
"""
try: keys = find_keys_for_unique_key_nested(self.dust_methods, self.model_dust_map_name)
except ValueError: keys = find_keys_for_unique_key_nested(self.dust_methods, self.model_dust_map_name.split("_", 1)[1])
if len(keys) == 1:
method = None
map_name = keys[0]
elif len(keys) == 2:
method = keys[0]
map_name = keys[1]
else: raise ValueError("Something is wrong")
return method, map_name
# -----------------------------------------------------------------
class AnalysisRuns(object):
"""
This function ...
"""
def __init__(self, modeling_path):
"""
This function ...
:param modeling_path:
"""
# Set the modeling path
self.modeling_path = modeling_path
# -----------------------------------------------------------------
@property
def modeling_data_path(self):
return fs.join(self.modeling_path, "data")
# -----------------------------------------------------------------
@property
def galaxy_info_path(self):
# Set the path to the galaxy info file
return fs.join(self.modeling_data_path, "info.dat")
# -----------------------------------------------------------------
@lazyproperty
def galaxy_info(self):
"""
This function ...
:return:
"""
# Load the info table
table = tables.from_file(self.galaxy_info_path)
# To ordered dict
info = OrderedDict()
for name in table.colnames: info[name] = table[name][0]
# Return the info
return info
# -----------------------------------------------------------------
@lazyproperty
def hubble_type(self):
return self.galaxy_info["Hubble Type"]
# -----------------------------------------------------------------
@lazyproperty
def hubble_stage(self):
return self.galaxy_info["Hubble Stage"]
# -----------------------------------------------------------------
@property
def analysis_path(self):
return fs.join(self.modeling_path, "analysis")
# -----------------------------------------------------------------
@lazyproperty
def names(self):
return fs.directories_in_path(self.analysis_path, returns="name")
# -----------------------------------------------------------------
@lazyproperty
def paths(self):
return fs.directories_in_path(self.analysis_path, returns="path")
# -----------------------------------------------------------------
def __len__(self):
"""
This function ...
:return:
"""
return len(self.names)
# -----------------------------------------------------------------
@lazyproperty
def empty(self):
return sequences.is_empty(self.names)
# -----------------------------------------------------------------
@lazyproperty
def has_single(self):
return sequences.is_singleton(self.names)
# -----------------------------------------------------------------
@lazyproperty
def single_name(self):
return sequences.get_singleton(self.names)
# -----------------------------------------------------------------
@lazyproperty
def single_path(self):
return self.get_path(self.single_name)
# -----------------------------------------------------------------
def get_path(self, name):
"""
This function ...
:param name:
:return:
"""
return fs.join(self.analysis_path, name)
# -----------------------------------------------------------------
def load(self, name):
"""
This function ...
:param name:
:return:
"""
analysis_run_path = self.get_path(name)
if not fs.is_directory(analysis_run_path): raise ValueError("Analysis run '" + name + "' does not exist")
return AnalysisRun.from_path(analysis_run_path, hubble_stage=self.hubble_stage)
# -----------------------------------------------------------------
@lazyproperty
def single(self):
return AnalysisRun.from_path(self.single_path, hubble_stage=self.hubble_stage)
# -----------------------------------------------------------------
@lazyproperty
def last_name(self):
"""
This function ...
:return:
"""
#if self.empty: return None
#if self.has_single: return self.single_name
#return sorted(self.names)[-1]
return fs.name(self.last_path)
# -----------------------------------------------------------------
@lazyproperty
def last_path(self):
return fs.last_created_path(*self.paths)
# -----------------------------------------------------------------
@lazyproperty
def last(self):
return self.load(self.last_name)
# -----------------------------------------------------------------
class CachedAnalysisRun(AnalysisRunBase):
"""
This class ...
"""
def __init__(self, run_path, remote):
"""
The constructor ...
:param run_path:
:param remote:
"""
# Set attributes
self.path = run_path
self.remote = remote
# -----------------------------------------------------------------
@property
def galaxy_name(self):
return fs.name(self.original_modeling_path)
# -----------------------------------------------------------------
@classmethod
def from_path(cls, path, remote):
"""
This function ...
:param path:
:param remote:
:return:
"""
return cls(path, remote)
# -----------------------------------------------------------------
@lazyproperty
def info(self):
return AnalysisRunInfo.from_remote_file(self.info_path, self.remote)
# -----------------------------------------------------------------
@property
def original_path(self):
return self.info.path
# -----------------------------------------------------------------
@property
def original_analysis_path(self):
return fs.directory_of(self.original_path)
# -----------------------------------------------------------------
@property
def original_modeling_path(self):
return fs.directory_of(self.original_analysis_path)
# -----------------------------------------------------------------
@lazyproperty
def config(self):
return Configuration.from_remote_file(self.config_path, self.remote)
# -----------------------------------------------------------------
@lazyproperty
def heating_config(self):
return Configuration.from_remote_file(self.heating_config_path, self.remote)
# -----------------------------------------------------------------
@lazyproperty
def wavelength_grid(self):
return WavelengthGrid.from_skirt_input(self.wavelength_grid_path, remote=self.remote)
# -----------------------------------------------------------------
@lazyproperty
def dust_grid(self):
return load_grid(self.dust_grid_path, remote=self.remote)
# -----------------------------------------------------------------
@lazyproperty
def nfiles(self):
return self.remote.nfiles_in_path(self.path, recursive=True)
# -----------------------------------------------------------------
@lazyproperty
def disk_space(self):
return self.remote.directory_size(self.path)
# -----------------------------------------------------------------
@property
def has_output(self):
return self.remote.has_files_in_path(self.output_path)
# -----------------------------------------------------------------
@property
def has_logfile(self):
return self.remote.is_file(self.logfile_path)
# -----------------------------------------------------------------
@lazyproperty
def logfile(self):
return LogFile.from_remote_file(self.total_logfile_path, self.remote)
# -----------------------------------------------------------------
@property
def has_misc(self):
return self.remote.has_files_in_path(self.total_misc_path)
# -----------------------------------------------------------------
@property
def has_extracted(self):
return self.remote.has_files_in_path(self.total_extr_path)
# -----------------------------------------------------------------
@property
def has_progress(self):
return self.remote.is_file(self.progress_path)
# -----------------------------------------------------------------
@property
def has_timeline(self):
return self.remote.is_file(self.timeline_path)
# -----------------------------------------------------------------
@property
def has_memory(self):
return self.remote.is_file(self.memory_path)
# -----------------------------------------------------------------
@lazyproperty
def progress(self):
return ProgressTable.from_remote_file(self.progress_path, remote=self.remote)
# -----------------------------------------------------------------
@lazyproperty
def timeline(self):
return TimeLineTable.from_remote_file(self.timeline_path, remote=self.remote)
# -----------------------------------------------------------------
@lazyproperty
def memory(self):
return MemoryUsageTable.from_remote_file(self.memory_path, remote=self.remote)
# -----------------------------------------------------------------
@property
def has_plots(self):
return self.remote.has_files_in_path(self.plot_path)
# -----------------------------------------------------------------
@property
def has_attenuation(self):
return self.remote.is_directory(self.attenuation_path) and not self.remote.is_empty(self.attenuation_path)
# -----------------------------------------------------------------
@property
def has_colours(self):
return self.remote.is_directory(self.colours_path) and not self.remote.is_empty(self.colours_path)
# -----------------------------------------------------------------
@property
def colour_names(self):
return self.remote.files_in_path(self.colours_simulated_path, extension="fits", returns="name")
# -----------------------------------------------------------------
@property
def has_residuals(self):
return self.remote.is_directory(self.residuals_path) and not self.remote.is_empty(self.residuals_path)
# -----------------------------------------------------------------
@property
def residual_image_names(self):
return self.remote.files_in_path(self.residuals_path, extension="fits", not_contains=["significance"], returns="name")
# -----------------------------------------------------------------
@property
def has_maps_attenuation(self):
return self.remote.is_directory(self.attenuation_maps_path) and not self.remote.is_empty(self.attenuation_maps_path)
# -----------------------------------------------------------------
@property
def nattenuation_maps(self):
if self.remote.has_files_in_path(self.attenuation_maps_path, extension="fits"): return self.remote.nfiles_in_path(self.attenuation_maps_path, extension="fits")
else: return self.remote.nfiles_in_path(self.attenuation_maps_path, extension="fits", recursive=True, recursion_level=1)
# -----------------------------------------------------------------
@property
def has_maps_colours(self):
return self.remote.is_directory(self.colour_maps_path) and not self.remote.is_empty(self.colour_maps_path)
# -----------------------------------------------------------------
@property
def ncolour_maps(self):
if self.remote.has_files_in_path(self.colour_maps_path, extension="fits"): return self.remote.nfiles_in_path(self.colour_maps_path, extension="fits")
else: return self.remote.nfiles_in_path(self.colour_maps_path, extension="fits", recursive=True, recursion_level=1)
# -----------------------------------------------------------------
@property
def has_maps_dust(self):
return self.remote.is_directory(self.dust_maps_path) and not self.remote.is_empty(self.dust_maps_path)
# -----------------------------------------------------------------
@property
def ndust_maps(self):
if self.remote.has_files_in_path(self.dust_maps_path, extension="fits"): return self.remote.nfiles_in_path(self.dust_maps_path, extension="fits")
else: return self.remote.nfiles_in_path(self.dust_maps_path, extension="fits", recursive=True, recursion_level=1)
# -----------------------------------------------------------------
@property
def has_maps_ionizing(self):
return self.remote.is_directory(self.ionizing_maps_path) and not self.remote.is_empty(self.ionizing_maps_path)
# -----------------------------------------------------------------
@property
def nionizing_maps(self):
if self.remote.has_files_in_path(self.ionizing_maps_path, extension="fits"): return self.remote.nfiles_in_path(self.ionizing_maps_path, extension="fits")
else: return self.remote.nfiles_in_path(self.ionizing_maps_path, extension="fits", recursive=True, recursion_level=1)
# -----------------------------------------------------------------
@property
def has_maps_rgb(self):
return self.remote.is_directory(self.rgb_maps_path) and not self.remote.is_empty(self.rgb_maps_path)
# -----------------------------------------------------------------
@property
def has_maps_old(self):
return self.remote.is_directory(self.old_maps_path) and not self.remote.is_empty(self.old_maps_path)
# -----------------------------------------------------------------
@property
def nold_maps(self):
if self.remote.has_files_in_path(self.old_maps_path, extension="fits"): return self.remote.nfiles_in_path(self.old_maps_path, extension="fits")
else: return self.remote.nfiles_in_path(self.old_maps_path, extension="fits", recursive=True, recursion_level=1)
# -----------------------------------------------------------------
@property
def has_maps_ssfr(self):
return self.remote.is_directory(self.ssfr_maps_path) and not self.remote.is_empty(self.ssfr_maps_path)
# -----------------------------------------------------------------
@property
def nssfr_maps(self):
if self.remote.has_files_in_path(self.ssfr_maps_path, extension="fits"): return self.remote.nfiles_in_path(self.ssfr_maps_path, extension="fits")
else: return self.remote.nfiles_in_path(self.ssfr_maps_path, extension="fits", recursive=True, recursion_level=1)
# -----------------------------------------------------------------
@property
def has_maps_tir(self):
return self.remote.is_directory(self.tir_maps_path) and not self.remote.is_empty(self.tir_maps_path)
# -----------------------------------------------------------------
@property
def ntir_maps(self):
if self.remote.has_files_in_path(self.tir_maps_path, extension="fits"): return self.remote.nfiles_in_path(self.tir_maps_path, extension="fits")
else: return self.remote.nfiles_in_path(self.tir_maps_path, extension="fits", recursive=True, recursion_level=1)
# -----------------------------------------------------------------
@property
def has_maps_young(self):
return self.remote.is_directory(self.young_maps_path) and not self.remote.is_empty(self.young_maps_path)
# -----------------------------------------------------------------
@property
def nyoung_maps(self):
if self.remote.has_files_in_path(self.young_maps_path, extension="fits"): return self.remote.nfiles_in_path(self.young_maps_path, extension="fits")
else: return self.remote.nfiles_in_path(self.young_maps_path, extension="fits", recursive=True, recursion_level=1)
# -----------------------------------------------------------------
@property
def has_heating(self):
return self.remote.is_file(self.heating_config_path)
# -----------------------------------------------------------------
@property
def ski_file(self):
return SkiFile.from_remote_file(self.ski_file_path, self.remote)
# -----------------------------------------------------------------
@property
def has_dust_grid_simulation_logfile(self):
return self.remote.is_file(self.dust_grid_simulation_logfile_path)
# -----------------------------------------------------------------
@lazyproperty
def dust_grid_simulation_logfile(self):
return LogFile.from_remote_file(self.dust_grid_simulation_logfile_path, self.remote)
# -----------------------------------------------------------------
@lazyproperty
def dust_grid_tree(self):
"""
This function ...
:return:
"""
# Give debug message
log.debug("Loading the dust grid tree, this may take a while (depending on the number of nodes) ...")
# Return the tree
return DustGridTree.from_remote_file(self.dust_grid_tree_path, self.remote)
# -----------------------------------------------------------------
@property
def remote_script_paths(self):
return self.remote.files_in_path(self.path, extension="sh")
# -----------------------------------------------------------------
def get_remote_script_commands(self):
"""
This fucntion ...
:return:
"""
commands = dict()
# Loop over the script paths
for path in self.remote_script_paths:
# Get host ID
host_id = fs.strip_extension(fs.name(path))
lines = []
# Open the file
for line in self.remote.read_lines(path):
if line.startswith("#"): continue
if not line.strip(): continue
lines.append(line)
# Set the commands
commands[host_id] = lines
# Return the commands
return commands
# -----------------------------------------------------------------
def get_remote_script_commands_for_host(self, host_id):
"""
This function ...
:param host_id:
:return:
"""
commands = self.get_remote_script_commands()
if host_id in commands: return commands[host_id]
else: return []
# -----------------------------------------------------------------
def get_heating_ski_for_contribution(self, contribution):
"""
This function ...
:param contribution:
:return:
"""
return SkiFile.from_remote_file(self.ski_path_for_contribution(contribution), self.remote)
# -----------------------------------------------------------------
@property
def has_simulated_sed(self):
return self.remote.is_file(self.simulated_sed_path)
# -----------------------------------------------------------------
@lazyproperty
def simulated_sed(self):
return SED.from_skirt(self.simulated_sed_path, remote=self.remote)
# -----------------------------------------------------------------
@property
def has_simulated_fluxes(self):
return self.remote.is_file(self.simulated_fluxes_path)
# -----------------------------------------------------------------
@lazyproperty
def simulated_fluxes(self):
return ObservedSED.from_remote_file(self.simulated_fluxes_path, self.remote)
# -----------------------------------------------------------------
class CachedAnalysisRuns(AnalysisRunBase):
"""
This class ...
"""
def __init__(self, modeling_path, remote):
"""
This function ...
:param modeling_path:
:param remote:
"""
# Set attributes
self.modeling_path = modeling_path
self.remote = load_remote(remote, silent=True)
# -----------------------------------------------------------------
@property
def galaxy_name(self):
return fs.name(self.modeling_path)
# -----------------------------------------------------------------
@lazyproperty
def cache_directory_name(self):
return self.galaxy_name + "_analysis"
# -----------------------------------------------------------------
@lazyproperty
def cache_directory_path(self):
"""
This function ...
:return:
"""
path = fs.join(self.remote.home_directory, self.cache_directory_name)
if not self.remote.is_directory(path): self.remote.create_directory(path)
return path
# -----------------------------------------------------------------
def get_cache_directory_path_run(self, run_name):
"""
This function ...
:param run_name:
:return:
"""
path = fs.join(self.cache_directory_path, run_name)
#if not self.remote.is_directory(path): self.remote.create_directory(path)
return path
# -----------------------------------------------------------------
@lazyproperty
def names(self):
return self.remote.directories_in_path(self.cache_directory_path, returns="name")
# -----------------------------------------------------------------
def __len__(self):
"""
This function ...
:return:
"""
return len(self.names)
# -----------------------------------------------------------------
@lazyproperty
def empty(self):
return sequences.is_empty(self.names)
# -----------------------------------------------------------------
@lazyproperty
def has_single(self):
return sequences.is_singleton(self.names)
# -----------------------------------------------------------------
@lazyproperty
def single_name(self):
return sequences.get_singleton(self.names)
# -----------------------------------------------------------------
@lazyproperty
def single_path(self):
return self.get_path(self.single_name)
# -----------------------------------------------------------------
def get_path(self, name):
"""
Thisn function ...
:param name:
:return:
"""
return self.get_cache_directory_path_run(name)
# -----------------------------------------------------------------
def load(self, name):
"""
This function ...
:param name:
:return:
"""
analysis_run_path = self.get_path(name)
if not self.remote.is_directory(analysis_run_path): raise ValueError("Analysis run '" + name + "' does not exist")
return CachedAnalysisRun.from_path(analysis_run_path, self.remote)
# -----------------------------------------------------------------
@lazyproperty
def single(self):
return CachedAnalysisRun.from_path(self.single_path, self.remote)
# -----------------------------------------------------------------
# ABSTRACT PROPERTIES FROM BASE CLASS?
@property
def colour_names(self):
return None
# -----------------------------------------------------------------
@property
def dust_grid_simulation_logfile(self):
return None
# -----------------------------------------------------------------
@property
def dust_grid_tree(self):
return None
# -----------------------------------------------------------------
@property
def has_dust_grid_simulation_logfile(self):
return None
# -----------------------------------------------------------------
@property
def has_heating(self):
return None
# -----------------------------------------------------------------
@property
def has_maps_attenuation(self):
return None
# -----------------------------------------------------------------
@property
def has_maps_colours(self):
return None
# -----------------------------------------------------------------
@property
def has_maps_dust(self):
return None
# -----------------------------------------------------------------
@property
def has_maps_ionizing(self):
return None
# -----------------------------------------------------------------
@property
def has_maps_old(self):
return None
# -----------------------------------------------------------------
@property
def has_maps_ssfr(self):
return None
# -----------------------------------------------------------------
@property
def has_maps_tir(self):
return None
# -----------------------------------------------------------------
@property
def has_maps_young(self):
return None
# -----------------------------------------------------------------
@property
def has_simulated_fluxes(self):
return None
# -----------------------------------------------------------------
@property
def has_simulated_sed(self):
return None
# -----------------------------------------------------------------
@property
def nattenuation_maps(self):
return None
# -----------------------------------------------------------------
@property
def ncolour_maps(self):
return None
# -----------------------------------------------------------------
@property
def ndust_maps(self):
return None
# -----------------------------------------------------------------
@property
def nionizing_maps(self):
return None
# -----------------------------------------------------------------
@property
def nold_maps(self):
return None
# -----------------------------------------------------------------
@property
def nssfr_maps(self):
return None
# -----------------------------------------------------------------
@property
def ntir_maps(self):
return None
# -----------------------------------------------------------------
@property
def nyoung_maps(self):
return None
# -----------------------------------------------------------------
def find_value_for_unique_key_nested(dictionary, key, allow_none=False):
"""
This function ...
:param dictionary:
:param key:
:param allow_none:
:return:
"""
values = []
for key_i in dictionary:
# Sub-dict
if isinstance(dictionary[key_i], dict):
value = find_value_for_unique_key_nested(dictionary[key_i], key, allow_none=True)
if value is not None: values.append(value)
# Matches
elif key_i == key:
value = dictionary[key_i]
values.append(value)
if len(values) == 0 and not allow_none: raise ValueError("Key not found")
if len(values) > 1: raise ValueError("Not unique")
# Return the only value
if len(values) == 0: return None
else: return values[0]
# -----------------------------------------------------------------
def find_keys_for_unique_key_nested(dictionary, key, allow_none=False):
"""
This function ...
:param dictionary:
:param key:
:param allow_none:
:return:
"""
keys = []
for key_i in dictionary:
# Sub-dict
if isinstance(dictionary[key_i], dict):
keys_subdict = find_keys_for_unique_key_nested(dictionary[key_i], key, allow_none=True)
if keys_subdict is not None:
keys.append([key_i] + keys_subdict)
# Matches
elif key_i == key: keys.append([key])
if len(keys) == 0 and not allow_none: raise ValueError("Key not found")
if len(keys) > 1: raise ValueError("Not unique")
if len(keys) == 0: return None
else: return keys[0]
# -----------------------------------------------------------------
|
SKIRT/PTS
|
modeling/analysis/run.py
|
Python
|
agpl-3.0
| 116,812
|
[
"Galaxy"
] |
04395402a5d0c9530a7c30dbeb8cfede1629b1efc86d06719a5b5d5ea8005d13
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import moose
moose.Neutral('/model')
compt = moose.CubeMesh('/model/Compart')
compt.volume = 1.6667e-21
s = moose.Pool('/model/Compart/S')
s.concInit = 0.3
p = moose.Pool('/model/Compart/P')
r = moose.Reac('/model/Compart/Reac')
moose.connect(r, 'sub', s, 'reac')
moose.connect(r, 'prd', p, 'reac')
bf = moose.BufPool('/model/Compart/BufPool')
f = moose.Function('/model/Compart/BufPool/function')
moose.connect( f, 'valueOut', bf ,'setN' )
numVariables = f.numVars
f.numVars+=1
expr = ""
expr = (f.expr+'+'+'x'+str(numVariables))
expr = expr.lstrip("0 +")
expr = expr.replace(" ","")
f.expr = expr
moose.connect( s, 'nOut', f.x[numVariables], 'input' )
bf1 = moose.BufPool('/model/Compart/BufPool1')
f1 = moose.Function('/model/Compart/BufPool1/func')
moose.connect( f1, 'valueOut', bf1 ,'setN' )
numVariables = f1.numVars
expr = ""
expr = (f1.expr+'+'+'x'+str(numVariables))
expr = expr.lstrip("0 +")
expr = expr.replace(" ","")
f1.expr = expr
moose.connect( s, 'nOut', f1.x[numVariables], 'input' )
compts = moose.wildcardFind('/model/##[ISA=ChemCompt]')
print(" f name= ",f.name, f.tick)
print(" f1 name= ",f1.name, f1.tick)
for compt in compts:
ksolve = moose.Ksolve( compt.path+'/ksolve' )
stoich = moose.Stoich( compt.path+'/stoich' )
stoich.compartment = compt
stoich.ksolve = ksolve
stoich.path = compt.path+"/##"
print("After solver is set")
print(" f name= ",f.name,f.tick)
print(" f1 name= ", f1.name, f1.tick)
|
dharmasam9/moose-core
|
tests/issues/issue_45.py
|
Python
|
gpl-3.0
| 1,514
|
[
"MOOSE"
] |
7ab02a22aef598c41b43f234649135c9d8d79355add1acebba503482ce75a204
|
# -*- coding: utf-8 -*-
#
# Bio-Formats documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 29 15:42:49 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import re
import subprocess
def popen(args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
copy = os.environ.copy()
shell = (sys.platform == "win32")
return subprocess.Popen(args,
env=copy,
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=shell)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.)
extensions = ['sphinx.ext.extlinks']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Bio-Formats'
title = project +u' Documentation'
author = u'The Open Microscopy Environment'
copyright = u'2000-2012, ' + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
try:
if "BF_RELEASE" in os.environ:
release = os.environ.get('BF_RELEASE')
else:
p = popen(['git','describe'])
tag = p.communicate()
split_tag = re.split("^(v)?(.*?)(-[0-9]+)?((-)g(.*?))?$",tag[0])
# The full version, including alpha/beta/rc tags.
release = split_tag[2]
split_release = re.split("^([0-9]\.[0-9])(\.[0-9]+)(.*?)$",release)
# The short X.Y version.
version = split_release[1]
except:
version = 'UNKNOWN'
release = 'UNKNOWN'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
if "OMERODOC_URI" in os.environ:
omerodoc_uri = os.environ.get('OMERODOC_URI')
else:
omerodoc_uri = 'http://www.openmicroscopy.org/site/support/omero4/'
if "SOURCE_BRANCH" in os.environ:
source_branch = os.environ.get('SOURCE_BRANCH')
else:
source_branch = 'develop'
if "JENKINS_JOB" in os.environ:
jenkins_job = os.environ.get('JENKINS_JOB')
else:
jenkins_job = 'BIOFORMATS-trunk'
bf_github_root = 'https://github.com/openmicroscopy/bioformats/'
bf_github = bf_github_root + 'blob/' + source_branch + '/'
jenkins_root = 'http://hudson.openmicroscopy.org.uk/'
jenkins_job_root = jenkins_root + 'job/' + jenkins_job + '/'
extlinks = {
'wiki' : ('http://trac.openmicroscopy.org.uk/ome/wiki/'+ '%s', ''),
'ticket' : ('http://trac.openmicroscopy.org.uk/ome/ticket/'+ '%s', '#'),
'snapshot' : ('http://cvs.openmicroscopy.org.uk/snapshots/'+ '%s', ''),
'plone' : ('http://www.openmicroscopy.org/site/'+ '%s', ''),
'oo' : ('http://www.openmicroscopy.org/' + '%s', ''),
'doi' : ('http://dx.doi.org/' + '%s', ''),
'source' : (bf_github + '%s', ''),
'bfreader' : (bf_github + 'components/bio-formats/src/loci/formats/in/' + '%s', ''),
'scifioreader' : (bf_github + 'components/scifio/src/loci/formats/in/' + '%s', ''),
'bfwriter' : (bf_github + 'components/bio-formats/src/loci/formats/out/' + '%s', ''),
'scifiowriter' : (bf_github + 'components/scifio/src/loci/formats/out/' + '%s', ''),
'jenkins' : (jenkins_job_root + '%s', ''),
'javadoc' : (jenkins_job_root + 'javadoc/' + '%s', ''),
'mailinglist' : ('http://lists.openmicroscopy.org.uk/mailman/listinfo/' + '%s', ''),
'forum' : ('http://www.openmicroscopy.org/community/' + '%s', ''),
'omerodoc': (omerodoc_uri + '%s', ''),
'bf_plone' : ('http://www.openmicroscopy.org/site/products/bio-formats/%s/', ''),
}
rst_epilog = """
.. _Hibernate: http://www.hibernate.org
.. _ZeroC: http://www.zeroc.com
.. _Ice: http://www.zeroc.com
.. _OME-TIFF: https://www.openmicroscopy.org/site/support/file-formats/ome-tiff
.. _OME-XML: http://www.openmicroscopy.org/site/support/file-formats/the-ome-xml-file
.. |Poor| image:: /images/crystal-1.png
.. |Fair| image:: /images/crystal-2.png
.. |Good| image:: /images/crystal-3.png
.. |Very Good| image:: /images/crystal-4.png
.. |Outstanding| image:: /images/crystal-5.png
.. |no| image:: /images/crystal-no.png
.. |yes| image:: /images/crystal-yes.png
"""
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = { '**' : ['globalbftoc.html', 'pagetoc.html',
'relations.html', 'searchbox.html', 'sourcelink.html'] }
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bio-Formatsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
target = project + '-' + release + '.tex'
latex_documents = [
(master_doc, target, title, author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = True
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
latex_show_urls = 'footnote'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'OMERO', title, author, 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, title, author, 'omedocs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for the linkcheck builder ----------------------------------------
# Regular expressions that match URIs that should not be checked when doing a linkcheck build
linkcheck_ignore = ['http://www.openmicroscopy.org/site/support/faq']
|
ximenesuk/bioformats
|
docs/sphinx/conf.py
|
Python
|
gpl-2.0
| 11,342
|
[
"CRYSTAL"
] |
dcee56805eaba49f273b6b981985fad8b855ff9841efd4668d8769ba7efabe4d
|
# This component calculates heating and cooling degree hours
#
# Ladybug: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Ladybug.
#
# Copyright (c) 2013-2015, Mostapha Sadeghipour Roudsari <Sadeghipour@gmail.com>
# Ladybug is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Ladybug is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ladybug; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Calculates heating and cooling degree-hours.
Degree-hours are defined as the difference between the base temperature and the average ambient outside air temperature multiplied by the number of hours that this difference condition exists.
-
Provided by Ladybug 0.0.61
Args:
_hourlyDryBulbTemperature: Annual dry bulb temperature from the Import epw component (in degrees Celsius).
_coolingBaseTemperature_: Base temperature for cooling (in degrees Celsius). Default is set to 18.3C but this can be much lower if the analysis is for a building with high heat gain or insulation.
_heatingBaseTemperature_: Base temperature for heating (in degrees Celsius). Default is set to 23.3C but this can be much lower if the analysis is for a building with high heat gain or insulation.
Returns:
readMe!: A ummary of the input.
hourly_coolingDegHours: Cooling degree-hours for each hour of the year. For visualizations over the whole year, connect this to the grasshopper chart/graph component.
hourly_heatingDegHours: Heating degree-days for each hour of the year. For visualizations over the whole year, connect this to the grasshopper chart/graph component.
daily_coolingDegHours: Cooling degree-days summed for each day of the year. For visualizations of over the whole year, connect this to the grasshopper chart/graph component.
daily_heatingDegHours: Heating degree-days summed for each day of the year. For visualizations of over the whole year, connect this to the grasshopper chart/graph component.
monthly_coolingDegHours: Cooling degree-days summed for each month of the year.
monthly_heatingDegHours: Heating degree-days summed for each month of the year.
annual_coolingDegHours: The total cooling degree-days for the entire year.
annual_heatingDegHours: The total heating degree-days for the entire year.
"""
ghenv.Component.Name = "Ladybug_CDH_HDH"
ghenv.Component.NickName = "CDH_HDH"
ghenv.Component.Message = 'VER 0.0.61\nNOV_05_2015'
ghenv.Component.Category = "Ladybug"
ghenv.Component.SubCategory = "1 | AnalyzeWeatherData"
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "0"
except: pass
import scriptcontext as sc
from clr import AddReference
AddReference('Grasshopper')
import Grasshopper.Kernel as gh
# provide inputs
try: coolingSetPoint = float(_coolingBaseTemperature_)
except: coolingSetPoint = 23.3
print 'Cooling base temperature: ' + `coolingSetPoint` + ' C.'
coolingSetBack = coolingSetPoint
try: heatingSetPoint = float(_heatingBaseTemperature_)
except: heatingSetPoint = 18.3
print 'Heating base temperature is: ' + `heatingSetPoint` + ' C.'
heatingSetBack = heatingSetPoint
try: startOfWorkingHours = float(occupationStartHour - 1)
except: startOfWorkingHours = 0
try: endOfWorkingHours = float(occupationEndHour - 1)
except: endOfWorkingHours = 23
def main(coolingSetPoint, heatingSetPoint, coolingSetBack, heatingSetBack, startOfWorkingHours, endOfWorkingHours):
# import the classes
if sc.sticky.has_key('ladybug_release'):
try:
if not sc.sticky['ladybug_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Ladybug to use this compoent." + \
"Use updateLadybug component to update userObjects.\n" + \
"If you have already updated userObjects drag Ladybug_Ladybug component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
lb_preparation = sc.sticky["ladybug_Preparation"]()
# copy the custom code here
# check the input data
try:
hourlyDBTemp = _hourlyDryBulbTemperature
if hourlyDBTemp[2] == 'Dry Bulb Temperature' and hourlyDBTemp[4] == 'Hourly': checkData = True
else: checkData = False
except: checkData = False
if checkData:
# separate data
indexList, listInfo = lb_preparation.separateList(hourlyDBTemp, lb_preparation.strToBeFound)
#separate total, diffuse and direct radiations
separatedLists = []
for i in range(len(indexList)-1):
selList = []
[selList.append(float(x)) for x in hourlyDBTemp[indexList[i]+7:indexList[i+1]]]
separatedLists.append(selList)
hourly_coolingDegHours = [];
hourly_heatingDegHours = [];
daily_coolingDegHours = [];
daily_heatingDegHours = [];
monthly_coolingDegHours = [];
monthly_heatingDegHours = [];
annual_coolingDegHours = [];
annual_heatingDegHours = [];
for l in range(len(separatedLists)):
[hourly_coolingDegHours.append(item) for item in listInfo[l][:2]]
hourly_coolingDegHours.append('Cooling Degree Hours')
hourly_coolingDegHours.append('Degree Hours')
hourly_coolingDegHours.append('Hourly')
[hourly_coolingDegHours.append(item) for item in listInfo[l][5:7]]
[hourly_heatingDegHours.append(item) for item in listInfo[l][:2]]
hourly_heatingDegHours.append('Heating Degree Hours')
hourly_heatingDegHours.append('Degree Hours')
hourly_heatingDegHours.append('Hourly')
[hourly_heatingDegHours.append(item) for item in listInfo[l][5:7]]
[daily_coolingDegHours.append(item) for item in listInfo[l][:2]]
daily_coolingDegHours.append('Cooling Degree Hours')
daily_coolingDegHours.append('Degree Hours')
daily_coolingDegHours.append('Daily')
[daily_coolingDegHours.append(item) for item in listInfo[l][5:7]]
[daily_heatingDegHours.append(item) for item in listInfo[l][:2]]
daily_heatingDegHours.append('Heating Degree Hours')
daily_heatingDegHours.append('Degree Hours')
daily_heatingDegHours.append('Daily')
[daily_heatingDegHours.append(item) for item in listInfo[l][5:7]]
[monthly_coolingDegHours.append(item) for item in listInfo[l][:2]]
monthly_coolingDegHours.append('Cooling Degree Hours')
monthly_coolingDegHours.append('Degree Hours')
monthly_coolingDegHours.append('Monthly')
[monthly_coolingDegHours.append(item) for item in listInfo[l][5:7]]
[monthly_heatingDegHours.append(item) for item in listInfo[l][:2]]
monthly_heatingDegHours.append('Heating Degree Hours')
monthly_heatingDegHours.append('Degree Hours')
monthly_heatingDegHours.append('Monthly')
[monthly_heatingDegHours.append(item) for item in listInfo[l][5:7]]
[annual_coolingDegHours.append(item) for item in listInfo[l][:2]]
annual_coolingDegHours.append('Cooling Degree Hours')
annual_coolingDegHours.append('Degree Hours')
annual_coolingDegHours.append('Annual')
[annual_coolingDegHours.append(item) for item in listInfo[l][5:7]]
[annual_heatingDegHours.append(item) for item in listInfo[l][:2]]
annual_heatingDegHours.append('Heating Degree Hours')
annual_heatingDegHours.append('Degree Hours')
annual_heatingDegHours.append('Annual')
[annual_heatingDegHours.append(item) for item in listInfo[l][5:7]]
hourlyTemperature = separatedLists[l]
# for each hour based on hourly temperature data
for hour, temp in enumerate(hourlyTemperature):
if float(temp) < heatingSetPoint:
hourly_heatingDegHours.append(heatingSetPoint - float(temp))
else: hourly_heatingDegHours.append(0)
if coolingSetPoint < float(temp):
hourly_coolingDegHours.append(float(temp) - coolingSetPoint)
else: hourly_coolingDegHours.append(0)
# for each day based on hourly degree hours
for day in range (int(len(hourlyTemperature)/24)):
# 7 is for thr first 7 members of the list which are information
daily_heatingDegHours.append(sum(hourly_heatingDegHours[(day * 24) + 7:((day+1)*24) + 7]))
daily_coolingDegHours.append(sum(hourly_coolingDegHours[(day * 24) + 7:((day+1)*24) + 7]))
numOfDays = lb_preparation.numOfDays
for month in range(len(numOfDays)- 1):
monthly_heatingDegHours.append(sum(daily_heatingDegHours[numOfDays[month] + 7: numOfDays[month + 1]+ 7]))
monthly_coolingDegHours.append(sum(daily_coolingDegHours[numOfDays[month] + 7: numOfDays[month + 1]+ 7]))
annual_heatingDegHours.append(sum(monthly_heatingDegHours[7:]))
annual_coolingDegHours.append(sum(monthly_coolingDegHours[7:]))
return hourly_coolingDegHours, hourly_heatingDegHours, daily_coolingDegHours, daily_heatingDegHours, monthly_coolingDegHours, monthly_heatingDegHours, annual_coolingDegHours, annual_heatingDegHours
elif hourlyDBTemp[0] == 'Connect temperature here':
print 'Connect annual hourly dry bulb temperature'
return -1
else:
warning = 'Please provide annual hourly dry bulb temperature!'
print warning
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
else:
print "You should first let the Ladybug fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should first let the Ladybug fly...")
return -1
if 1 > 0: #(geometry and geometry[0]!=None):
#if runIt:
result = main(coolingSetPoint, heatingSetPoint, coolingSetBack, heatingSetBack, startOfWorkingHours, endOfWorkingHours)
if result!= -1:
hourly_coolingDegHours, hourly_heatingDegHours, daily_coolingDegHours, daily_heatingDegHours, monthly_coolingDegHours, monthly_heatingDegHours, annual_coolingDegHours, annual_heatingDegHours = result
|
boris-p/ladybug
|
src/Ladybug_CDH_HDH.py
|
Python
|
gpl-3.0
| 11,925
|
[
"EPW"
] |
29132233a9b8cfe0b9b0f44f689857c4de908eab6ccbc51e3d66ba1f5de709b7
|
import unittest
import subprocess
import os
import sys
if sys.version_info[0] >= 3:
from io import StringIO
else:
from io import BytesIO as StringIO
import utils
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
test_dir = utils.set_search_paths(TOPDIR)
from allosmod.util import check_output # noqa: E402
import allosmod.get_contacts # noqa: E402
import allosmod.get_ss # noqa: E402
import allosmod.edit_restraints # noqa: E402
class TruncatedGaussianRestraint(allosmod.edit_restraints.Restraint):
def handle_parameters(self, params):
self.delE, self.slope, self.scl_delx = [float(x) for x in params[:3]]
del params[:3]
self.weights = [float(x) for x in params[:self.modal]]
self.means = [float(x) for x in params[self.modal:self.modal*2]]
self.stdevs = [float(x) for x in params[self.modal*2:self.modal*3]]
class MockRestraintEditor(allosmod.edit_restraints.RestraintEditor):
def __init__(self):
sigmas = allosmod.edit_restraints.Sigmas(2, 2.0, 3.0, 4.0)
allosmod.edit_restraints.RestraintEditor.__init__(
self, "dummyoth.rsr", "dummyas.rsr", "dummypdbfile", ["contact"],
"dummyatomlist", sigmas, 10.0, 0.2, None, False, False)
self.contacts = allosmod.edit_restraints.ContactMap()
self.beta_structure = {}
self.HETscale *= 4.0 # fail if HETscale not in parent
def check_parse_restraint(self, r, delEmax=10.0):
from allosmod.edit_restraints import TruncatedGaussianParameters
r_from_form = {3: allosmod.edit_restraints.GaussianRestraint,
4: allosmod.edit_restraints.MultiGaussianRestraint,
7: allosmod.edit_restraints.CosineRestraint,
9: allosmod.edit_restraints.BinormalRestraint,
10: allosmod.edit_restraints.SplineRestraint,
50: TruncatedGaussianRestraint}
tgparams = TruncatedGaussianParameters(delEmax, delEmaxNUC=20.0,
slope=3.0, scl_delx=4.0,
breaks={})
fh = StringIO()
self.parse_restraint(tgparams, r, fh)
fh.seek(0)
for line in fh:
if line.startswith('R'):
typ, form, rest = line.split(None, 2)
yield r_from_form[int(form)](line, r.atoms)
class Tests(unittest.TestCase):
def test_bad(self):
"""Test wrong arguments to edit_restraints"""
for args in ([], [''] * 6):
check_output(['allosmod', 'edit_restraints'] + args,
stderr=subprocess.STDOUT, retcode=2)
check_output([sys.executable, '-m',
'allosmod.edit_restraints'] + args,
stderr=subprocess.STDOUT, retcode=2)
def test_setup_atoms(self):
"""Test setup_atoms()"""
import modeller
from allosmod.edit_restraints import RestraintEditor
sigmas = allosmod.edit_restraints.Sigmas(1, 1.0, 2.0, 3.0)
with open('atomlistASRS', 'w') as fh:
for i in range(64):
fh.write("%d %s\n" % (i+1, "AS" if i < 10 else "RS"))
with open('break.dat', 'w') as fh:
fh.write('1 20')
env = modeller.environ()
env.io.hetatm = True
e = RestraintEditor(
'test.rsr', 'test.rsr',
os.path.join(test_dir, 'input', 'test_editrsr.pdb'),
['test.pdb'], 'atomlistASRS', sigmas, 10.0, 0.1,
'break.dat', False, False)
class Residue(object):
pass
def mock_get_cont(fname, rcut):
ri = Residue()
ri.index = 1
rj = Residue()
rj.index = 4
yield ri, rj, 10.0
def mock_get_ss(pdb_file):
return []
with utils.mock_method(allosmod.get_contacts, 'get_contacts',
mock_get_cont):
with utils.mock_method(allosmod.get_ss, 'get_ss', mock_get_ss):
e.setup_atoms(env)
contacts = sorted(e.contacts.keys())
# Should have the 1-4 interaction from get_contacts, plus the two
# nucleic acids (#6 and #7) should interact with everything
self.assertEqual(contacts, [(1, 4), (1, 6), (1, 7), (2, 6), (2, 7),
(3, 6), (3, 7), (4, 6), (4, 7), (5, 6),
(5, 7), (6, 6), (6, 7), (7, 7)])
self.assertEqual(e.breaks, {1: 20.0})
# First 10 atoms should be allosteric site
self.assertEqual([a.isAS for a in e.atoms], [True]*10 + [False]*54)
# One CA and CB for each residue
self.assertEqual(len([a for a in e.atoms if a.isCA]), 4)
self.assertEqual(len([a for a in e.atoms if a.isCB]), 4)
self.assertTrue(e.atoms[1].isCA)
self.assertTrue(e.atoms[2].isCB)
self.assertEqual(len([a for a in e.atoms if a.isSC]), 5)
self.assertTrue(e.atoms[3].isSC)
self.assertEqual(len([a for a in e.atoms if a.isNUC]), 39)
self.assertEqual(len([a for a in e.atoms if a.torestr]), 18)
os.unlink('break.dat')
os.unlink('atomlistASRS')
def test_sigmas(self):
"""Test Sigmas class"""
class Atom(object):
def __init__(self, isAS, isSC):
self.isAS, self.isSC = isAS, isSC
# Test one template, allosteric site, BB-BB
sigmas = allosmod.edit_restraints.Sigmas(1, 1.0, 2.0, 3.0)
g = sigmas.get((Atom(isAS=True, isSC=False),
Atom(isAS=True, isSC=False)))
self.assertAlmostEqual(g, 1.0 * 1.0, places=1)
# Test one template, allosteric site, SC-BB
sigmas = allosmod.edit_restraints.Sigmas(1, 1.0, 2.0, 3.0)
g = sigmas.get((Atom(isAS=True, isSC=True),
Atom(isAS=True, isSC=False)))
self.assertAlmostEqual(g, 1.5 * 1.0, places=1)
g = sigmas.get((Atom(isAS=True, isSC=False),
Atom(isAS=True, isSC=True)))
self.assertAlmostEqual(g, 1.5 * 1.0, places=1)
# Test one template, allosteric site, SC-SC
sigmas = allosmod.edit_restraints.Sigmas(1, 1.0, 2.0, 3.0)
g = sigmas.get((Atom(isAS=True, isSC=True),
Atom(isAS=True, isSC=True)))
self.assertAlmostEqual(g, 1.5 * 1.5 * 1.0, places=1)
# Test two templates, allosteric site, SC-SC
sigmas = allosmod.edit_restraints.Sigmas(2, 1.0, 2.0, 3.0)
g = sigmas.get((Atom(isAS=True, isSC=True),
Atom(isAS=True, isSC=True)))
self.assertAlmostEqual(g, 1.0, places=1)
g = sigmas.get_scaled((Atom(isAS=True, isSC=True),
Atom(isAS=True, isSC=True)))
self.assertAlmostEqual(g, 4.0, places=1)
# Test one template, regulated site, SC-SC
sigmas = allosmod.edit_restraints.Sigmas(1, 1.0, 2.0, 3.0)
g = sigmas.get((Atom(isAS=False, isSC=True),
Atom(isAS=False, isSC=True)))
self.assertAlmostEqual(g, 1.5 * 1.5 * 2.0, places=1)
# Test one template, interface, SC-SC
sigmas = allosmod.edit_restraints.Sigmas(1, 1.0, 2.0, 3.0)
g = sigmas.get((Atom(isAS=True, isSC=True),
Atom(isAS=False, isSC=True)))
self.assertAlmostEqual(g, 1.5 * 1.5 * 3.0, places=1)
g = sigmas.get((Atom(isAS=False, isSC=True),
Atom(isAS=True, isSC=True)))
self.assertAlmostEqual(g, 1.5 * 1.5 * 3.0, places=1)
def test_truncated_gaussian_parameters(self):
"""Test TruncatedGaussianParameters class"""
from allosmod.edit_restraints import TruncatedGaussianParameters
class Residue(object):
pass
class Atom(object):
def __init__(self, ri):
self.a = self
self.a.residue = Residue()
self.a.residue.index = ri
tgparams = TruncatedGaussianParameters(delEmax=1.0, delEmaxNUC=2.0,
slope=3.0, scl_delx=4.0,
breaks={})
# nuc
e = tgparams.get_dele(None, local=False, nuc=True)
self.assertAlmostEqual(e, 2.0, places=1)
# non-local, no breaks
e = tgparams.get_dele((Atom(1), Atom(2)), local=False, nuc=False)
self.assertAlmostEqual(e, 1.0, places=1)
# local, no breaks
e = tgparams.get_dele((Atom(1), Atom(2)), local=True, nuc=False)
self.assertAlmostEqual(e, 10.0, places=1)
# breaks
tgparams = TruncatedGaussianParameters(delEmax=2.0, delEmaxNUC=6.0,
slope=3.0, scl_delx=4.0,
breaks={1: 20.0})
for local in True, False:
e = tgparams.get_dele((Atom(1), Atom(2)), local=local, nuc=False)
self.assertAlmostEqual(e, 40.0, places=1)
def test_restraint(self):
"""Test Restraint base class"""
from allosmod.edit_restraints import Restraint
class Atom(object):
def __init__(self, ind):
self.index = ind
self.a = self
self.assertRaises(ValueError, Restraint,
"R 3 1 9 12 2 2 1 3 2 10.00 20.00",
[Atom(i) for i in range(1, 10)])
def make_restraint(self, modify_atom_func, args, natom, cls, fmt):
from allosmod.edit_restraints import Atom
class ModellerResidue(object):
hetatm = False
class ModellerAtom(object):
def __init__(self, ind):
self.index = ind
self.residue = ModellerResidue()
atoms = [Atom(ModellerAtom(i+1)) for i in range(natom)]
if modify_atom_func:
modify_atom_func(atoms, args)
r = cls(fmt % (len(atoms),
' '.join('%d' % (x+1) for x in range(natom))), atoms)
return r
def make_gaussian_restraint(self, modify_atom_func, args=None, natom=2):
from allosmod.edit_restraints import GaussianRestraint
return self.make_restraint(modify_atom_func, args, natom,
GaussianRestraint,
"R 3 1 9 12 %d 2 1 %s 10.00 20.00")
def make_multi_gaussian_restraint(self, modify_atom_func, args=None,
natom=2):
from allosmod.edit_restraints import MultiGaussianRestraint
return self.make_restraint(modify_atom_func, args, natom,
MultiGaussianRestraint,
"R 4 2 9 12 %d 6 1 %s 0.8 0.2 10.00 "
"20.00 5.0 8.0")
def make_cosine_restraint(self, modify_atom_func, args=None, natom=2):
from allosmod.edit_restraints import CosineRestraint
return self.make_restraint(modify_atom_func, args, natom,
CosineRestraint,
"R 7 2 9 12 %d 2 1 %s 20.0 30.0")
def make_spline_restraint(self, modify_atom_func=None, args=None, natom=2):
from allosmod.edit_restraints import SplineRestraint
return self.make_restraint(modify_atom_func, args, natom,
SplineRestraint,
"R 10 22 3 13 %d 3 1 %s x y z")
def test_is_intrahet(self):
"""Test Restraint.is_intrahet()"""
def set_hetatm(atoms, hetatm):
for a, h in zip(atoms, hetatm):
a.a.residue.hetatm = h
r = self.make_gaussian_restraint(set_hetatm, [True, True])
self.assertTrue(r.is_intrahet())
r = self.make_gaussian_restraint(set_hetatm, [True, False])
self.assertFalse(r.is_intrahet())
r = self.make_gaussian_restraint(set_hetatm, [False, False])
self.assertFalse(r.is_intrahet())
def test_is_ca_cb_interaction(self):
"""Test Restraint.is_ca_cb_interaction()"""
def modify_atoms(atoms, nuc_ca_cb):
for a, n in zip(atoms, nuc_ca_cb):
a.isNUC, a.isCA, a.isCB = n
# No atom is CA, CB, or a nucleotide
r = self.make_gaussian_restraint(modify_atoms, [[False, False, False],
[True, True, True]])
self.assertFalse(r.is_ca_cb_interaction())
# nuc-nuc interaction
r = self.make_gaussian_restraint(modify_atoms, [[True, False, False],
[True, False, False]])
self.assertTrue(r.is_ca_cb_interaction())
# CA-nuc interaction
r = self.make_gaussian_restraint(modify_atoms, [[True, False, False],
[False, True, False]])
self.assertTrue(r.is_ca_cb_interaction())
# CA-CB interaction
r = self.make_gaussian_restraint(modify_atoms, [[False, False, True],
[False, True, False]])
self.assertTrue(r.is_ca_cb_interaction())
def test_is_sidechain_sidechain_interaction(self):
"""Test Restraint.is_sidechain_sidechain_interaction()"""
def modify_atoms(atoms, sc_cb):
for a, s in zip(atoms, sc_cb):
a.isSC, a.isCB = s
# SC-SC interaction
r = self.make_gaussian_restraint(modify_atoms, [[True, False],
[True, False]])
self.assertTrue(r.is_sidechain_sidechain_interaction())
# SC-CB interaction
r = self.make_gaussian_restraint(modify_atoms, [[True, False],
[False, True]])
self.assertTrue(r.is_sidechain_sidechain_interaction())
# BB-BB interaction
r = self.make_gaussian_restraint(modify_atoms, [[False, False],
[False, False]])
self.assertFalse(r.is_sidechain_sidechain_interaction())
def test_is_beta_beta_interaction(self):
"""Test Restraint.is_beta_beta_interaction()"""
def modify_atoms(atoms, rind):
for a, r in zip(atoms, rind):
a.a.residue.index = r
beta_structure = {4: True}
# beta-beta interaction
r = self.make_gaussian_restraint(modify_atoms, [4, 4])
self.assertTrue(r.is_beta_beta_interaction(beta_structure))
# beta-nonbeta interaction
r = self.make_gaussian_restraint(modify_atoms, [4, 2])
self.assertFalse(r.is_beta_beta_interaction(beta_structure))
def test_is_intra_protein_interaction(self):
"""Test Restraint.is_intra_protein_interaction()"""
def modify_atoms(atoms, nuc):
for a, n in zip(atoms, nuc):
a.isNUC = n
# protein-protein interaction
r = self.make_gaussian_restraint(modify_atoms, [False, False])
self.assertTrue(r.is_intra_protein_interaction())
# protein-nucleotide interaction
r = self.make_gaussian_restraint(modify_atoms, [False, True])
self.assertFalse(r.is_intra_protein_interaction())
def test_is_intra_dna_interaction(self):
"""Test Restraint.is_intra_dna_interaction()"""
def modify_atoms(atoms, nuc_r):
for a, n in zip(atoms, nuc_r):
a.isNUC, a.torestr = n
# restrNUC-restrNUC interaction
r = self.make_gaussian_restraint(modify_atoms, [[True, True],
[True, True]])
self.assertTrue(r.is_intra_dna_interaction())
# restrNUC-NUC interaction
r = self.make_gaussian_restraint(modify_atoms, [[True, True],
[True, False]])
self.assertFalse(r.is_intra_dna_interaction())
# restrNUC-protein interaction
r = self.make_gaussian_restraint(modify_atoms, [[True, True],
[False, False]])
self.assertFalse(r.is_intra_dna_interaction())
def test_is_protein_dna_interaction(self):
"""Test Restraint.is_protein_dna_interaction()"""
def modify_atoms(atoms, nuc_r):
for a, n in zip(atoms, nuc_r):
a.isNUC, a.torestr = n
# protein-restrNUC interaction
r = self.make_gaussian_restraint(modify_atoms, [[False, False],
[True, True]])
self.assertTrue(r.is_protein_dna_interaction())
# protein-unrestrNUC interaction
r = self.make_gaussian_restraint(modify_atoms, [[False, False],
[True, False]])
self.assertFalse(r.is_protein_dna_interaction())
# protein-protein interaction
r = self.make_gaussian_restraint(modify_atoms, [[False, False],
[False, False]])
self.assertFalse(r.is_protein_dna_interaction())
# restrNUC-restrNUC interaction
r = self.make_gaussian_restraint(modify_atoms, [[True, True],
[True, True]])
self.assertFalse(r.is_protein_dna_interaction())
def test_is_allosteric_interaction(self):
"""Test Restraint.is_allosteric_interaction()"""
def modify_atoms(atoms, allos):
for a, n in zip(atoms, allos):
a.isAS = n
# AS-AS interaction
r = self.make_gaussian_restraint(modify_atoms, [True, True])
self.assertTrue(r.is_allosteric_interaction())
# AS-RS interaction
r = self.make_gaussian_restraint(modify_atoms, [True, False])
self.assertFalse(r.is_allosteric_interaction())
# RS-RS interaction
r = self.make_gaussian_restraint(modify_atoms, [False, False])
self.assertFalse(r.is_allosteric_interaction())
def test_gaussian_restraint(self):
"""Test GaussianRestraint class"""
from allosmod.edit_restraints import GaussianRestraint
from allosmod.edit_restraints import TruncatedGaussianParameters
class Atom(object):
def __init__(self, ind):
self.index = ind
self.a = self
r = GaussianRestraint("R 3 1 9 12 2 2 1 3 2 10.00 20.00",
[Atom(i) for i in range(1, 10)])
self.assertEqual([a.a.index for a in r.atoms], [3, 2])
self.assertEqual(r.modal, 1)
self.assertEqual(r.feat, 9)
self.assertEqual(r.group, 12)
self.assertAlmostEqual(r.mean, 10.0, places=1)
self.assertAlmostEqual(r.firstmean, 10.0, places=1)
self.assertAlmostEqual(r.stdev, 20.0, places=1)
self.assertTrue(r.any_mean_below(15.0))
self.assertFalse(r.any_mean_below(5.0))
self.assertFalse(r.any_mean_below(10.0))
s = StringIO()
r.write(s)
self.assertEqual(s.getvalue(), 'R 3 1 9 12 2 2 1 '
'3 2 10.0000 20.0000\n')
# transform to multigaussian
s = StringIO()
r.transform(None, 2, 30.0, truncated=False, fh=s)
self.assertEqual(s.getvalue(),
'R 4 2 9 12 2 6 1 3 2 '
'0.5000 0.5000 10.0000 10.0000 '
'30.0000 30.0000\n')
# transform to truncated gaussian
s = StringIO()
tgparams = TruncatedGaussianParameters(delEmax=2.0, delEmaxNUC=3.0,
slope=4.0, scl_delx=5.0,
breaks={})
r.transform(tgparams, 2, 30.0, truncated=True, nuc=True, fh=s)
self.assertEqual(
s.getvalue(),
'R 50 2 9 12 2 9 1 3 2 '
'3.0000 4.0000 5.0000 0.5000 0.5000 '
'10.0000 10.0000 30.0000 30.0000\n')
r.rescale(4.0)
self.assertAlmostEqual(r.stdev, 5.0, places=1)
def test_multi_gaussian_restraint(self):
"""Test MultiGaussianRestraint class"""
from allosmod.edit_restraints import MultiGaussianRestraint
from allosmod.edit_restraints import TruncatedGaussianParameters
class Atom(object):
def __init__(self, ind):
self.index = ind
self.a = self
r = MultiGaussianRestraint("R 4 2 9 12 2 6 1 3 2 0.8 0.2 "
"10.00 20.00 30.00 40.00",
[Atom(i) for i in range(1, 10)])
self.assertEqual([a.a.index for a in r.atoms], [3, 2])
self.assertEqual(r.modal, 2)
self.assertEqual(r.feat, 9)
self.assertEqual(r.group, 12)
self.assertEqual(len(r.means), 2)
self.assertEqual(len(r.stdevs), 2)
self.assertAlmostEqual(r.means[0], 10.0, places=1)
self.assertAlmostEqual(r.means[1], 20.0, places=1)
self.assertAlmostEqual(r.firstmean, 10.0, places=1)
self.assertAlmostEqual(r.stdevs[0], 30.0, places=1)
self.assertAlmostEqual(r.stdevs[1], 40.0, places=1)
self.assertTrue(r.any_mean_below(15.0))
self.assertFalse(r.any_mean_below(5.0))
self.assertFalse(r.any_mean_below(10.0))
s = StringIO()
r.write(s)
self.assertEqual(
s.getvalue(),
'R 4 2 9 12 2 6 1 3 2 '
'0.8000 0.2000 10.0000 20.0000 30.0000 40.0000\n')
# transform to multigaussian
s = StringIO()
r.transform(None, 8, 70.0, truncated=False, fh=s)
self.assertEqual(s.getvalue(),
'R 4 2 9 12 2 6 1 3 2 '
'0.5000 0.5000 10.0000 20.0000 '
'70.0000 70.0000\n')
# transform to truncated gaussian
s = StringIO()
tgparams = TruncatedGaussianParameters(delEmax=2.0, delEmaxNUC=3.0,
slope=4.0, scl_delx=5.0,
breaks={})
r.transform(tgparams, 8, 70.0, truncated=True, nuc=True, fh=s)
self.assertEqual(
s.getvalue(),
'R 50 2 9 12 2 9 1 3 2 '
'3.0000 4.0000 5.0000 0.5000 0.5000 '
'10.0000 20.0000 70.0000 70.0000\n')
r.rescale(5.0)
self.assertAlmostEqual(r.stdevs[0], 6.0, places=1)
self.assertAlmostEqual(r.stdevs[1], 8.0, places=1)
def test_cosine_restraint(self):
"""Test CosineRestraint class"""
from allosmod.edit_restraints import CosineRestraint
class Atom(object):
def __init__(self, ind):
self.index = ind
self.a = self
r = CosineRestraint("R 7 2 9 12 2 2 1 3 2 2.0 4.0",
[Atom(i) for i in range(1, 10)])
self.assertEqual([a.a.index for a in r.atoms], [3, 2])
self.assertEqual(r.modal, 2)
self.assertEqual(r.feat, 9)
self.assertEqual(r.group, 12)
self.assertAlmostEqual(r.phase, 2.0, places=1)
self.assertAlmostEqual(r.force, 4.0, places=1)
s = StringIO()
r.write(s)
self.assertEqual(
s.getvalue(),
'R 7 2 9 12 2 2 1 3 '
'2 2.0000 4.0000\n')
r.rescale(2.5)
self.assertAlmostEqual(r.force, 10.0, places=1)
def test_binormal_restraint(self):
"""Test BinormalRestraint class"""
from allosmod.edit_restraints import BinormalRestraint
class Atom(object):
def __init__(self, ind):
self.index = ind
self.a = self
r = BinormalRestraint("R 9 2 9 12 2 2 1 3 2 x y z",
[Atom(i) for i in range(1, 10)])
self.assertEqual([a.a.index for a in r.atoms], [3, 2])
s = StringIO()
r.write(s)
self.assertEqual(s.getvalue(),
'R 9 2 9 12 2 2 1 '
'3 2 x y z\n')
def test_spline_restraint(self):
"""Test SplineRestraint class"""
from allosmod.edit_restraints import SplineRestraint
class Atom(object):
def __init__(self, ind):
self.index = ind
self.a = self
r = SplineRestraint("R 10 2 9 12 2 2 1 3 2 x y z",
[Atom(i) for i in range(1, 10)])
self.assertEqual([a.a.index for a in r.atoms], [3, 2])
s = StringIO()
r.write(s)
self.assertEqual(s.getvalue(),
'R 10 2 9 12 2 2 1 '
'3 2 x y z\n')
def test_restraint_filters(self):
"""Test restraint filters"""
from allosmod.edit_restraints import filter_rs_rs, filter_not_rs_rs
class Atom(object):
def __init__(self, isAS):
self.isAS = isAS
as_as = (Atom(True), Atom(True))
as_rs = (Atom(True), Atom(False))
rs_as = (Atom(False), Atom(True))
rs_rs = (Atom(False), Atom(False))
self.assertTrue(filter_rs_rs(rs_rs))
self.assertFalse(filter_rs_rs(rs_as))
self.assertFalse(filter_rs_rs(as_rs))
self.assertFalse(filter_rs_rs(as_as))
self.assertFalse(filter_not_rs_rs(rs_rs))
self.assertTrue(filter_not_rs_rs(rs_as))
self.assertTrue(filter_not_rs_rs(as_rs))
self.assertTrue(filter_not_rs_rs(as_as))
def test_add_ca_boundary_restraints(self):
"""Test add_ca_boundary_restraints()"""
from allosmod.edit_restraints import add_ca_boundary_restraints
class Atom(object):
def __init__(self, ind, isCA):
self.isCA = isCA
self.index = ind
self.a = self
atoms = [Atom(5, False), Atom(7, True)]
s = StringIO()
add_ca_boundary_restraints(atoms, s)
restraints = s.getvalue().rstrip('\n').split('\n')
self.assertEqual(len(restraints), 6)
# Only atom #7 should be restrained
for r in restraints:
self.assertEqual(r[34:37], " 7 ")
def test_parse_restraints_file(self):
"""Test parse_restraints_file()"""
from allosmod.edit_restraints import parse_restraints_file
from allosmod.edit_restraints import filter_rs_rs
class Atom(object):
def __init__(self, ind, isAS):
self.index = ind
self.a = self
self.isAS = isAS
atoms = [Atom(i, True) for i in range(1, 10)]
# no filter
s = StringIO("R 3 1 9 12 2 2 1 3 2 10.00 20.00\n\n")
rs = list(parse_restraints_file(s, atoms))
self.assertEqual(len(rs), 1)
self.assertEqual([a.a.index for a in rs[0].atoms], [3, 2])
# rs-rs filter
s = StringIO("R 3 1 9 12 2 2 1 3 2 10.00 20.00\n\n")
rs = list(parse_restraints_file(s, atoms, filter_rs_rs))
self.assertEqual(len(rs), 0)
def test_atom(self):
"""Test Atom class"""
from allosmod.edit_restraints import Atom
a = Atom('foo')
self.assertEqual(a.isAS, False)
self.assertEqual(a.isNUC, False)
self.assertEqual(a.isSC, False)
self.assertEqual(a.isCA, False)
self.assertEqual(a.isCB, False)
self.assertEqual(a.torestr, False)
self.assertEqual(a.a, 'foo')
def test_contact_map(self):
"""Test ContactMap class"""
from allosmod.edit_restraints import ContactMap, Atom
class ModellerResidue(object):
pass
class ModellerAtom(object):
def __init__(self, ind):
self.residue = ModellerResidue()
self.residue.index = ind
c = ContactMap()
self.assertEqual(list(c.keys()), [])
self.assertFalse(c[(1, 4)])
c[(1, 4)] = True
c[(5, 2)] = True
self.assertTrue(c[(1, 4)])
self.assertTrue(c[(4, 1)])
self.assertTrue(c[(2, 5)])
self.assertTrue(c[(5, 2)])
self.assertTrue(c[(Atom(ModellerAtom(5)), 2)])
self.assertTrue(c[(5, Atom(ModellerAtom(2)))])
self.assertEqual(len(c.keys()), 2)
def test_get_beta(self):
"""Test get_beta()"""
from allosmod.edit_restraints import get_beta
def mock_get_ss(pdb_file):
if pdb_file == 'empty':
return []
elif pdb_file == 'all_beta':
return ['E']*3
elif pdb_file == 'all_helix':
return ['H']*10
elif pdb_file == 'some_beta':
return ['', 'E', '']
with utils.mock_method(allosmod.get_ss, 'get_ss', mock_get_ss):
self.assertEqual(get_beta('empty'), {})
self.assertEqual(get_beta('all_beta'), {1: True, 2: True, 3: True})
self.assertEqual(get_beta('all_helix'), {})
self.assertEqual(get_beta('some_beta'), {2: True})
def test_get_nuc_restrained(self):
"""Test get_nuc_restrained()"""
from allosmod.edit_restraints import get_nuc_restrained
self.assertFalse(get_nuc_restrained('OP1', 'any residue'))
self.assertFalse(get_nuc_restrained('OP2', 'any residue'))
self.assertTrue(get_nuc_restrained("O3'", 'any residue'))
self.assertTrue(get_nuc_restrained('N1', 'ADE'))
self.assertTrue(get_nuc_restrained('C2', 'DT'))
self.assertTrue(get_nuc_restrained('O2', 'U'))
self.assertTrue(get_nuc_restrained('O6', 'G'))
self.assertTrue(get_nuc_restrained('N1', 'CYT'))
self.assertFalse(get_nuc_restrained('N2', 'CYT'))
self.assertFalse(get_nuc_restrained('N2', 'URA'))
def test_parse_other(self):
"""Test parse of restraints unknown to AllosMod"""
class MockRestraint(object):
atoms = []
def is_intrahet(self):
return False
e = MockRestraintEditor()
r2 = list(e.check_parse_restraint(MockRestraint()))
# Unknown restraints are ignored
self.assertEqual(len(r2), 0)
def test_parse_coarse_ca_ca_intra_protein(self):
"""Test parse of coarse AS-AS CA-CA intra-protein restraint"""
e = MockRestraintEditor()
e.coarse = True
e.contacts[(1, 2)] = True # non-local interaction
def modify_atoms(atoms, arg):
atoms[0].isAS = atoms[1].isAS = True # AS-AS
atoms[0].isCA = atoms[1].isCA = True # CA-CA
atoms[0].a.residue.index = 1
atoms[1].a.residue.index = 2
r = self.make_gaussian_restraint(modify_atoms)
r2 = list(e.check_parse_restraint(r))
self.assertEqual(len(r2), 1)
self.assertEqual(type(r2[0]), TruncatedGaussianRestraint)
self.assertAlmostEqual(r2[0].delE, 10.0, places=1)
self.assertAlmostEqual(r2[0].slope, 3.0, places=1)
self.assertAlmostEqual(r2[0].scl_delx, 4.0, places=1)
self.assertEqual(len(r2[0].stdevs), 2)
self.assertAlmostEqual(r2[0].stdevs[0], 8.0, places=1)
# with tgauss_AS off
e.tgauss_AS = False
r = self.make_gaussian_restraint(modify_atoms)
r2 = list(e.check_parse_restraint(r))
self.assertEqual(len(r2), 1)
self.assertEqual(type(r2[0]),
allosmod.edit_restraints.GaussianRestraint)
self.assertAlmostEqual(r2[0].stdev, 8.0, places=1)
# with empty_AS on
e.tgauss_AS = True
e.empty_AS = True
r = self.make_gaussian_restraint(modify_atoms)
r2 = list(e.check_parse_restraint(r))
self.assertEqual(len(r2), 0)
def test_parse_ca_ca_multi_intra_protein(self):
"""Test parse of CA-CA multigauss intra-protein restraint"""
e = MockRestraintEditor()
e.contacts[(1, 2)] = True # non-local interaction
def modify_atoms(atoms, arg):
atoms[0].isAS = atoms[1].isAS = True # AS-AS
atoms[0].isCA = atoms[1].isCA = True # CA-CA
atoms[0].a.residue.index = 1
atoms[1].a.residue.index = 2
r = self.make_multi_gaussian_restraint(modify_atoms)
r2 = list(e.check_parse_restraint(r))
self.assertEqual(len(r2), 1)
self.assertEqual(type(r2[0]), TruncatedGaussianRestraint)
self.assertAlmostEqual(r2[0].delE, 10.0, places=1)
self.assertAlmostEqual(r2[0].slope, 3.0, places=1)
self.assertAlmostEqual(r2[0].scl_delx, 4.0, places=1)
self.assertEqual(len(r2[0].stdevs), 2)
self.assertAlmostEqual(r2[0].stdevs[0], 2.0, places=1)
# with delEmax = 0
r2 = list(e.check_parse_restraint(r, delEmax=0.))
self.assertEqual(type(r2[0]),
allosmod.edit_restraints.MultiGaussianRestraint)
self.assertEqual(len(r2[0].stdevs), 2)
self.assertAlmostEqual(r2[0].stdevs[0], 2.0, places=1)
def test_parse_rs_ca_ca_intra_protein(self):
"""Test parse of RS-RS CA-CA intra-protein restraint"""
e = MockRestraintEditor()
e.coarse = True
e.contacts[(1, 2)] = True # non-local interaction
def modify_atoms(atoms, arg):
atoms[0].isAS = atoms[1].isAS = False # RS-RS
atoms[0].isCA = atoms[1].isCA = True # CA-CA
atoms[0].a.residue.index = 1
atoms[1].a.residue.index = 2
r = self.make_gaussian_restraint(modify_atoms)
r2 = list(e.check_parse_restraint(r))
self.assertEqual(len(r2), 1)
self.assertEqual(type(r2[0]), TruncatedGaussianRestraint)
self.assertAlmostEqual(r2[0].delE, 10.0, places=1)
self.assertAlmostEqual(r2[0].slope, 3.0, places=1)
self.assertAlmostEqual(r2[0].scl_delx, 4.0, places=1)
self.assertEqual(len(r2[0].stdevs), 2)
self.assertAlmostEqual(r2[0].stdevs[0], 12.0, places=1)
# with delEmax = 0
r2 = list(e.check_parse_restraint(r, delEmax=0.))
self.assertEqual(len(r2), 1)
self.assertEqual(type(r2[0]),
allosmod.edit_restraints.MultiGaussianRestraint)
self.assertEqual(len(r2[0].stdevs), 1)
self.assertAlmostEqual(r2[0].stdevs[0], 12.0, places=1)
def test_parse_bond_restraint(self):
"""Test parse of bond restraint"""
e = MockRestraintEditor()
def modify_atoms(atoms, arg):
for a, h in zip(atoms, arg):
a.a.residue.hetatm = h
for het, scale in (False, 1.0), (True, 4.0):
r = self.make_gaussian_restraint(modify_atoms, [het, het])
r.group = 1
r2 = list(e.check_parse_restraint(r))
self.assertEqual(len(r2), 1)
self.assertEqual(type(r2[0]),
allosmod.edit_restraints.GaussianRestraint)
self.assertAlmostEqual(r2[0].mean, 10.0, places=1)
self.assertAlmostEqual(r2[0].stdev, 20.0 / scale, places=1)
def test_parse_angle_restraint(self):
"""Test parse of angle/dihedral restraint"""
e = MockRestraintEditor()
def modify_atoms(atoms, arg):
for a, h in zip(atoms, arg):
a.a.residue.hetatm = h
for natom in (3, 4):
for het, scale in (False, 1.0), (True, 4.0):
r = self.make_gaussian_restraint(modify_atoms, [het]*natom,
natom=natom)
r.group = 1
r2 = list(e.check_parse_restraint(r))
self.assertEqual(len(r2), 1)
self.assertEqual(type(r2[0]),
allosmod.edit_restraints.GaussianRestraint)
self.assertAlmostEqual(r2[0].mean, 10.0, places=1)
self.assertAlmostEqual(r2[0].stdev, 20.0 / scale, places=1)
def test_parse_multi_angle_restraint(self):
"""Test parse of multigauss angle/dihedral restraint"""
e = MockRestraintEditor()
def modify_atoms(atoms, arg):
for a, h in zip(atoms, arg):
a.a.residue.hetatm = h
for natom in (3, 4):
for het, scale in (False, 1.0), (True, 4.0):
r = self.make_multi_gaussian_restraint(
modify_atoms, [het]*natom, natom=natom)
r2 = list(e.check_parse_restraint(r))
self.assertEqual(len(r2), 1)
self.assertEqual(
type(r2[0]),
allosmod.edit_restraints.MultiGaussianRestraint)
self.assertEqual(len(r2[0].means), 2)
self.assertAlmostEqual(r2[0].weights[0], 0.8, places=1)
self.assertAlmostEqual(r2[0].weights[1], 0.2, places=1)
self.assertAlmostEqual(r2[0].means[0], 10.0, places=1)
self.assertAlmostEqual(r2[0].means[1], 20.0, places=1)
self.assertAlmostEqual(r2[0].stdevs[0], 5.0 / scale, places=1)
self.assertAlmostEqual(r2[0].stdevs[1], 8.0 / scale, places=1)
def test_parse_cosine_restraint(self):
"""Test parse of cosine restraint"""
e = MockRestraintEditor()
def modify_atoms(atoms, arg):
for a, h in zip(atoms, arg):
a.a.residue.hetatm = h
for het, scale in (False, 1.0), (True, 4.0):
r = self.make_cosine_restraint(modify_atoms, [het, het])
r2 = list(e.check_parse_restraint(r))
self.assertEqual(len(r2), 1)
self.assertEqual(type(r2[0]),
allosmod.edit_restraints.CosineRestraint)
self.assertAlmostEqual(r2[0].phase, 20.0, places=1)
self.assertAlmostEqual(r2[0].force, 30.0 * scale, places=1)
def test_parse_spline_restraint(self):
"""Test parse of spline restraint"""
# should pass through as-is
e = MockRestraintEditor()
r = self.make_spline_restraint()
r2 = list(e.check_parse_restraint(r))
self.assertEqual(len(r2), 1)
self.assertEqual(type(r2[0]),
allosmod.edit_restraints.SplineRestraint)
self.assertEqual(r2[0]._params, ['x', 'y', 'z'])
def test_parse_coarse_not_ca_cb(self):
"""Test parse of coarse restraint, not CA-CB"""
e = MockRestraintEditor()
e.coarse = True
def modify_atoms(atoms, arg):
atoms[0].a.residue.index = 1
atoms[1].a.residue.index = 2
r = self.make_gaussian_restraint(modify_atoms)
r2 = list(e.check_parse_restraint(r))
# Restraint should be omitted
self.assertEqual(len(r2), 0)
def test_parse_sidechain_too_long(self):
"""Test parse of sidechain-sidechain restraint, mean too big"""
e = MockRestraintEditor()
def modify_atoms(atoms, arg):
atoms[0].isSC = atoms[1].isSC = True
atoms[0].a.residue.index = 1
atoms[1].a.residue.index = 2
r = self.make_gaussian_restraint(modify_atoms)
r.mean = 5.1
r2 = list(e.check_parse_restraint(r))
# Restraint should be omitted
self.assertEqual(len(r2), 0)
def test_local_2to5_ca_cb(self):
"""Test locrigid CA-CB restraint with res range 2-5"""
e = MockRestraintEditor()
e.locrigid = True
def modify_atoms(atoms, seqdst):
atoms[0].isCA = atoms[1].isCA = True # CA-CA
atoms[0].a.residue.index = 30
atoms[1].a.residue.index = 30 + seqdst
for seqdst in 2, 5, -2, -5:
r = self.make_gaussian_restraint(modify_atoms, seqdst)
r2 = list(e.check_parse_restraint(r))
# sigma should be set to 2.0
self.assertEqual(len(r2), 1)
self.assertEqual(type(r2[0]), TruncatedGaussianRestraint)
self.assertEqual(len(r2[0].stdevs), 2)
self.assertAlmostEqual(r2[0].stdevs[0], 2.0, places=1)
self.assertAlmostEqual(r2[0].stdevs[1], 2.0, places=1)
def test_local_6to12_ca_cb(self):
"""Test locrigid CA-CB restraint with res range 6-12"""
e = MockRestraintEditor()
e.locrigid = True
def modify_atoms(atoms, seqdst):
atoms[0].isCA = atoms[1].isCA = True # CA-CA
atoms[0].a.residue.index = 30
atoms[1].a.residue.index = 30 + seqdst
for seqdst in 6, 12, -6, -12:
r = self.make_gaussian_restraint(modify_atoms, seqdst)
r.mean = 5.9
r2 = list(e.check_parse_restraint(r))
# sigma should be set to 2.0
self.assertEqual(len(r2), 1)
self.assertEqual(type(r2[0]), TruncatedGaussianRestraint)
self.assertEqual(len(r2[0].stdevs), 2)
self.assertAlmostEqual(r2[0].stdevs[0], 2.0, places=1)
self.assertAlmostEqual(r2[0].stdevs[1], 2.0, places=1)
# If mean >= 6.0, restraint is omitted
r = self.make_gaussian_restraint(modify_atoms, seqdst)
r.mean = 6.1
r2 = list(e.check_parse_restraint(r))
self.assertEqual(len(r2), 0)
def test_under3_ca_cb(self):
"""Test CA-CB restraint with res range <= 2"""
e = MockRestraintEditor()
def modify_atoms(atoms, arg):
atoms[0].isCA = atoms[1].isCA = True # CA-CA
atoms[0].a.residue.index = 30
atoms[1].a.residue.index = 32
r = self.make_gaussian_restraint(modify_atoms)
e.beta_structure = {30: True, 32: True}
r.mean = 5.9
r2 = list(e.check_parse_restraint(r))
# sigma should be set to 2.0
self.assertEqual(len(r2), 1)
self.assertEqual(type(r2[0]), TruncatedGaussianRestraint)
self.assertEqual(len(r2[0].stdevs), 2)
self.assertAlmostEqual(r2[0].stdevs[0], 2.0, places=1)
self.assertAlmostEqual(r2[0].stdevs[1], 2.0, places=1)
# If mean >= 6.0, restraint is omitted
r = self.make_gaussian_restraint(modify_atoms)
r.mean = 6.1
r2 = list(e.check_parse_restraint(r))
self.assertEqual(len(r2), 0)
# If not beta structure, restraint is omitted
r = self.make_gaussian_restraint(modify_atoms)
r.mean = 5.9
e.beta_structure = {}
r2 = list(e.check_parse_restraint(r))
self.assertEqual(len(r2), 0)
def test_parse_het_het(self):
"""Test parse of het-het restraint"""
e = MockRestraintEditor()
def modify_atoms(atoms, arg):
atoms[0].a.residue.hetatm = True
atoms[1].a.residue.hetatm = True
r = self.make_gaussian_restraint(modify_atoms)
r2 = list(e.check_parse_restraint(r))
# keep as is (but scaled by HETscale, 4.0)
self.assertEqual(len(r2), 1)
self.assertEqual(type(r2[0]),
allosmod.edit_restraints.GaussianRestraint)
self.assertAlmostEqual(r2[0].mean, 10.0, places=1)
self.assertAlmostEqual(r2[0].stdev, 20.0 / 4.0, places=1)
def test_protein_dna_restraint(self):
"""Test parse of protein-dna restraint"""
e = MockRestraintEditor()
e.contacts[(1, 2)] = True # non-local interaction
def modify_atoms(atoms, arg):
atoms[1].isNUC = atoms[1].torestr = True # protein-DNA, RS-RS
atoms[0].a.residue.index = 1
atoms[1].a.residue.index = 2
r = self.make_gaussian_restraint(modify_atoms)
r.mean = 7.9
r2 = list(e.check_parse_restraint(r))
self.assertEqual(len(r2), 1)
self.assertEqual(type(r2[0]), TruncatedGaussianRestraint)
self.assertAlmostEqual(r2[0].delE, 20.0, places=1) # delEmaxNUC
self.assertEqual(len(r2[0].stdevs), 2)
self.assertAlmostEqual(r2[0].stdevs[0], 3.0, places=1)
self.assertAlmostEqual(r2[0].stdevs[1], 3.0, places=1)
# If mean > rcutNUC (8.0), restraint should be omitted
r.mean = 8.1
r2 = list(e.check_parse_restraint(r))
self.assertEqual(len(r2), 0)
def test_intra_dna_restraint(self):
"""Test parse of intra-dna restraint"""
e = MockRestraintEditor()
e.contacts[(1, 2)] = True # non-local interaction
def modify_atoms(atoms, arg):
atoms[0].isNUC = atoms[0].torestr = True # DNA-DNA, RS-RS
atoms[1].isNUC = atoms[1].torestr = True
atoms[0].a.residue.index = 1
atoms[1].a.residue.index = 2
r = self.make_gaussian_restraint(modify_atoms)
r.mean = 7.9
# stdev should be forced to 1.0, mean not changed
r2 = list(e.check_parse_restraint(r))
self.assertEqual(len(r2), 1)
self.assertEqual(type(r2[0]),
allosmod.edit_restraints.GaussianRestraint)
self.assertAlmostEqual(r2[0].mean, 7.9, places=1)
self.assertAlmostEqual(r2[0].stdev, 1.0, places=1)
# If mean > rcutNUC (8.0), restraint should be omitted
r.mean = 8.1
r2 = list(e.check_parse_restraint(r))
self.assertEqual(len(r2), 0)
def test_setup_delEmax_no_coarse(self):
"""Test setup_delEmax(), coarse=False"""
e = MockRestraintEditor()
# coarse=False; delE* should be unchanged
e.setup_delEmax()
self.assertAlmostEqual(e.delEmax, 0.2, places=1)
self.assertAlmostEqual(e.delEmaxNUC, 0.12, places=2)
def test_setup_delEmax_no_rsr(self):
"""Test setup_delEmax(), no restraints"""
e = MockRestraintEditor()
# empty file; delE* should be unchanged
e.coarse = True
e.atoms = []
open('dummyas.rsr', 'w').close()
def mock_parse(fh, atoms):
return []
with utils.mock_method(allosmod.edit_restraints,
'parse_restraints_file', mock_parse):
e.setup_delEmax()
self.assertAlmostEqual(e.delEmax, 0.2, places=1)
self.assertAlmostEqual(e.delEmaxNUC, 0.12, places=2)
os.unlink('dummyas.rsr')
def test_setup_delEmax_rsr(self):
"""Test setup_delEmax(), with some restraints"""
e = MockRestraintEditor()
e.contacts[(1, 2)] = True
e.coarse = True
e.atoms = []
open('dummyas.rsr', 'w').close()
def mock_parse(fh, atoms):
def set_resind(atoms):
atoms[0].a.residue.index = 1
atoms[1].a.residue.index = 2
def make_ca_ca(atoms, arg):
set_resind(atoms)
atoms[0].isCA = atoms[1].isCA = True # CA-CA
def make_cb_cb(atoms, arg):
set_resind(atoms)
atoms[0].isCB = atoms[1].isCB = True # CB-CB
def make_sc_sc(atoms, arg):
set_resind(atoms)
atoms[0].isSC = atoms[1].isSC = True # SC-SC
# CA-CA restraint
r = self.make_gaussian_restraint(make_ca_ca)
yield r
# CB-CB restraint
r = self.make_gaussian_restraint(make_cb_cb)
yield r
# SC-SC restraint, but short (<distco_scsc)
r = self.make_gaussian_restraint(make_sc_sc)
r.mean = 4.9
yield r
# Restraint with natoms!=2
r = self.make_gaussian_restraint(make_ca_ca, natom=3)
yield r
# Restraint not in contacts
r = self.make_gaussian_restraint(make_ca_ca)
r.atoms[0].a.residue.index = 3
yield r
# Restraint of wrong type
r = self.make_cosine_restraint(make_ca_ca)
yield r
with utils.mock_method(allosmod.edit_restraints,
'parse_restraints_file', mock_parse):
e.setup_delEmax()
self.assertAlmostEqual(e.delEmax, 0.08, places=2)
self.assertAlmostEqual(e.delEmaxNUC, 0.05, places=2)
os.unlink('dummyas.rsr')
def test_simple(self):
"""Simple complete run of edit_restraints"""
with utils.temporary_directory() as tmpdir:
with open(os.path.join(tmpdir, 'pm_test.pdb'), 'w') as fh:
fh.write("""
ATOM 1 N ARG A 1 -18.387 -9.167 -1.701 1.00 0.54 N
ATOM 2 CA ARG A 1 -17.434 -9.856 -0.787 1.00 0.45 C
ATOM 3 C ARG A 1 -15.998 -9.610 -1.251 1.00 0.45 C
ATOM 4 O ARG A 1 -15.130 -10.444 -1.087 1.00 0.52 O
ATOM 5 CB ARG A 1 -17.793 -11.337 -0.899 1.00 0.46 C
""")
with open(os.path.join(tmpdir, 'test.rsr'), 'w') as fh:
fh.write("R 3 1 9 12 2 2 1 1 2 10.00 20.00\n")
with open(os.path.join(tmpdir, 'list4contacts'), 'w') as fh:
fh.write("test.pdb\n")
with open(os.path.join(tmpdir, 'atomlistASRS'), 'w') as fh:
fh.write("1 AS\n2 AS\n")
check_output(['allosmod', 'edit_restraints', 'test.rsr',
'test.rsr', 'pm_test.pdb', 'list4contacts',
'atomlistASRS'], cwd=tmpdir)
if __name__ == '__main__':
unittest.main()
|
salilab/allosmod-lib
|
test/test_edit_restraints.py
|
Python
|
lgpl-2.1
| 49,570
|
[
"Gaussian"
] |
9255d8890e8ca8a79a213823c8d4c9154f5abd5e86f83bfbeb036f087606a8fb
|
# -*- coding: utf-8 -*-
#
# define_hill_tononi.py
#
# This file is part of the NEST Instrumentation App.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST Instrumentation App is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST Instrumentation App is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST Instrumentation App. If not, see <http://www.gnu.org/licenses/>.
"""
Definition of partial Hill-Tononi (2005) Model.
This module provides layer and projections declarations suitable for
use with the NEST Topology Module.
The file defines a Hill-Tononi model variant limited to the primary pathway.
"""
from copy import deepcopy
import numpy as np
params = {
'Np': 40, # Number of rows and columns in primary nodes
'visSize': 8.0, # Extent of the layer
'ret_rate': 45.0, # Rate in the retina nodes
'ret_amplitude': 45.0, # Amplitude in the retina nodes
'temporal_frequency': 2.0, # Frequency of the retina nodes (Hz)
'lambda_dg': 2.0, # wavelength of drifting grating
'phi_dg': 0.0 # normal direction of grating (degrees)
}
def modified_copy(orig, diff):
"""
Returns a deep copy of dictionary with changes applied.
@param orig original dictionary, will be deep-copied
@param diff copy will be updated with this dict
"""
tmp = deepcopy(orig)
tmp.update(diff)
return tmp
def make_layers():
"""Build list of layers and models for HT Model."""
nrnmod = 'ht_neuron'
# Default parameter values in ht_neuron are for excitatory
# cortical cells. For inhibitory and thalamic cells, we
# have modified parameters from [1], Tables 2, 3.
# To model absence of various intrinsic currents, we set their
# peak conductance to zero. By default, all intrinsic currents
# are active.
#
# g_KL is set to 1.0, the value for the awake state.
# No I_T, I_h in cortical excitatory cells
ctxExPars = {'g_peak_T': 0.0,
'g_peak_h': 0.0}
# But L56 has I_h
ctxExL56Pars = {'g_peak_T': 0.0,
'g_peak_h': 1.0}
# 'spike_duration': 1.0
# No I_T, I_h in cortical inhibitory cells
ctxInPars = {'tau_m': 8.0,
'theta_eq': -53.0,
'tau_theta': 1.0,
'tau_spike': 0.5,
'g_peak_T': 0.0,
'g_peak_h': 0.0}
# Thalamic neurons have no I_KNa
thalPars = {'tau_m': 8.0,
'theta_eq': -53.0,
'tau_theta': 0.75,
'tau_spike': 0.75,
'E_rev_GABA_A': -80.0,
'g_peak_KNa': 0.0}
# Reticular neurons have no I_KNa, I_h
# We assume that the "thalamic" line of Table 2 applies to
# reticular neurons as well.
reticPars = {'tau_m': 8.0,
'theta_eq': -53.0,
'tau_theta': 0.75,
'tau_spike': 0.75,
'g_peak_KNa': 0.0,
'g_peak_h': 0.0}
models = [(nrnmod, 'Relay', thalPars),
(nrnmod, 'Inter', thalPars),
(nrnmod, 'RpNeuron', reticPars)]
# Build lists of cortical models using list comprehension.
models += [(nrnmod, layer + 'pyr', ctxExPars) for layer in ('L23', 'L4')]
models += [(nrnmod, layer + 'pyr', ctxExL56Pars) for layer in ('L56',)]
models += [(nrnmod, layer + 'in', ctxInPars)
for layer in ('L23', 'L4', 'L56')]
# Add synapse models, which differ only in receptor type.
# We first obtain the mapping of receptor names to recptor indices from the
# ht_neuron, then add the synapse model information to the models list.
# Hard coded to be independent of NEST
ht_rc = {u'AMPA': 1, u'GABA_A': 3, u'GABA_B': 4, u'NMDA': 2}
syn_models = [('static_synapse', syn, {'receptor_type': ht_rc[syn]})
for syn in ('AMPA', 'NMDA', 'GABA_A', 'GABA_B')]
# now layers, primary and secondary pathways
layerPropsP = {'rows': params['Np'],
'columns': params['Np'],
'extent': [params['visSize'], params['visSize']],
# 'center': [3, -1], # For testing purposes
'edge_wrap': True}
layers = [('Tp', modified_copy(layerPropsP, {'elements': ['Relay', 'Inter']})),
('Rp', modified_copy(layerPropsP, {'elements': 'RpNeuron'})),
('Vp_h', modified_copy(layerPropsP, {'elements':
['L23pyr', 2, 'L23in', 1,
'L4pyr', 2, 'L4in', 1,
'L56pyr', 2, 'L56in', 1]})),
('Vp_v', modified_copy(layerPropsP, {'elements':
['L23pyr', 2, 'L23in', 1,
'L4pyr', 2, 'L4in', 1,
'L56pyr', 2, 'L56in', 1]}))]
return layers, models, syn_models
def make_connections():
"""
Return list of dictionaries specifying connectivity.
NOTE: Connectivity is modified from Hill-Tononi for simplicity.
"""
# scaling parameters from grid elements to visual angle
dpcP = params['visSize'] / (params['Np'] - 1)
# ---------- PRIMARY PATHWAY ------------------------------------
ccConnections = []
ccxConnections = []
ctConnections = []
horIntraBase = {"connection_type": "divergent",
"synapse_model": "AMPA",
"mask": {"circular": {"radius": 12.0 * dpcP}},
"kernel": {"gaussian": {"p_center": 0.05, "sigma": 7.5 * dpcP}},
"weights": 1.0,
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
for conn in [{"sources": {"model": "L23pyr"}, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L23pyr"}, "targets": {"model": "L23pyr"}, 'synapse_model': 'NMDA'},
{"sources": {"model": "L23pyr"}, "targets": {"model": "L23in" }},
{"sources": {"model": "L4pyr" }, "targets": {"model": "L4pyr" },
"mask" : {"circular": {"radius": 7.0 * dpcP}}},
{"sources": {"model": "L4pyr" }, "targets": {"model": "L4in" },
"mask" : {"circular": {"radius": 7.0 * dpcP}}},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L56pyr" }},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L56in" }}]:
ndict = horIntraBase.copy()
ndict.update(conn)
ccConnections.append(ndict)
verIntraBase = {"connection_type": "divergent",
"synapse_model": "AMPA",
"mask": {"circular": {"radius": 2.0 * dpcP}},
"kernel": {"gaussian": {"p_center": 1.0, "sigma": 7.5 * dpcP}},
"weights": 2.0,
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
for conn in [{"sources": {"model": "L23pyr"}, "targets": {"model": "L56pyr"}, "weights": 1.0},
{"sources": {"model": "L23pyr"}, "targets": {"model": "L56pyr"}, "weights": 1.0, 'synapse_model': 'NMDA'},
{"sources": {"model": "L23pyr"}, "targets": {"model": "L56in" }, "weights": 1.0},
{"sources": {"model": "L4pyr" }, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L4pyr" }, "targets": {"model": "L23in" }},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L23in" }},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L4pyr" }},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L4in" }}]:
ndict = verIntraBase.copy()
ndict.update(conn)
ccConnections.append(ndict)
intraInhBase = {"connection_type": "divergent",
"synapse_model": "GABA_A",
"mask": {"circular": {"radius": 7.0 * dpcP}},
"kernel": {"gaussian": {"p_center": 0.25, "sigma": 7.5 * dpcP}},
"weights": 1.0,
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
for conn in [{"sources": {"model": "L23in"}, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L23in"}, "targets": {"model": "L23in" }},
{"sources": {"model": "L4in" }, "targets": {"model": "L4pyr" }},
{"sources": {"model": "L4in" }, "targets": {"model": "L4in" }},
{"sources": {"model": "L56in"}, "targets": {"model": "L56pyr"}},
{"sources": {"model": "L56in"}, "targets": {"model": "L56in" }}]:
ndict = intraInhBase.copy()
ndict.update(conn)
ccConnections.append(ndict)
ccxConnections.append(ndict)
intraInhBaseB = {"connection_type": "divergent",
"synapse_model": "GABA_B",
"mask": {"circular": {"radius": 1.0 * dpcP}},
"kernel": 0.3,
"weights": 1.0,
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
for conn in [{"sources": {"model": "L23in"}, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L4in" }, "targets": {"model": "L4pyr" }},
{"sources": {"model": "L56in"}, "targets": {"model": "L56pyr" }}]:
ndict = intraInhBaseB.copy()
ndict.update(conn)
ccConnections.append(ndict)
ccxConnections.append(ndict)
corThalBase = {"connection_type": "divergent",
"synapse_model": "AMPA",
"mask": {"circular": {"radius": 5.0 * dpcP}},
"kernel": {"gaussian": {"p_center": 0.5, "sigma": 7.5 * dpcP}},
"weights": 1.0,
"delays": {"uniform": {"min": 7.5, "max": 8.5}}}
for conn in [{"sources": {"model": "L56pyr"}, "targets": {"model": "Relay" }},
{"sources": {"model": "L56pyr"}, "targets": {"model": "Inter" }}]:
ndict = corThalBase.copy()
ndict.update(conn)
ctConnections.append(ndict)
corRet = corThalBase.copy()
corRet.update({"sources": {"model": "L56pyr"}, "targets": {"model": "RpNeuron"}, "weights": 2.5})
# build complete list of connections, build populations names
allconns = []
#! Cortico-cortical, same orientation
[allconns.append(['Vp_h','Vp_h',c]) for c in ccConnections]
[allconns.append(['Vp_v','Vp_v',c]) for c in ccConnections]
#! Cortico-cortical, cross-orientation
[allconns.append(['Vp_h','Vp_v',c]) for c in ccxConnections]
[allconns.append(['Vp_v','Vp_h',c]) for c in ccxConnections]
#! Cortico-thalamic connections
[allconns.append(['Vp_h','Tp',c]) for c in ctConnections]
[allconns.append(['Vp_v','Tp',c]) for c in ctConnections]
[allconns.append(['Vp_h','Rp',c]) for c in [corRet]]
[allconns.append(['Vp_v','Rp',c]) for c in [corRet]]
#! Thalamo-cortical connections
thalCorRect = {"connection_type": "convergent",
"sources": {"model": "Relay"},
"synapse_model": "AMPA",
"weights": 5.0,
"delays": {"uniform": {"min": 2.75, "max": 3.25}}}
#! Horizontally tuned
thalCorRect.update({"mask": {"rectangular": {"lower_left" : [-4.05*dpcP, -1.05*dpcP],
"upper_right": [ 4.05*dpcP, 1.05*dpcP]}}})
for conn in [{"targets": {"model": "L4pyr" }, "kernel": 0.5},
{"targets": {"model": "L56pyr"}, "kernel": 0.3}]:
thalCorRect.update(conn)
allconns.append(['Tp','Vp_h', thalCorRect.copy()])
#! Vertically tuned
thalCorRect.update({"mask": {"rectangular": {"lower_left" : [-1.05*dpcP, -4.05*dpcP],
"upper_right": [ 1.05*dpcP, 4.05*dpcP]}}})
for conn in [{"targets": {"model": "L4pyr" }, "kernel": 0.5},
{"targets": {"model": "L56pyr"}, "kernel": 0.3}]:
thalCorRect.update(conn)
allconns.append(['Tp','Vp_v', thalCorRect.copy()])
#! Diffuse connections
thalCorDiff = {"connection_type": "divergent",
"sources": {"model": "Relay"},
"synapse_model": "AMPA",
"weights": 5.0,
"mask": {"circular": {"radius": 5.0 * dpcP}},
"kernel": {"gaussian": {"p_center": 0.1, "sigma": 7.5 * dpcP}},
"delays": {"uniform": {"min": 2.75, "max": 3.25}}}
for conn in [{"targets": {"model": "L4in" }},
{"targets": {"model": "L56in"}}]:
thalCorDiff.update(conn)
allconns.append(['Tp','Vp_h', thalCorDiff.copy()])
allconns.append(['Tp','Vp_v', thalCorDiff.copy()])
#! Thalamic connections
thalBase = {"connection_type": "divergent",
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
for src, tgt, conn in [('Tp', 'Rp', {"sources": {"model": "Relay"},
"synapse_model": "AMPA",
"mask": {"circular": {"radius": 2.0 * dpcP}},
"kernel": {"gaussian": {"p_center": 1.0, "sigma": 7.5 * dpcP}},
"weights": 2.0}),
('Tp', 'Tp', {"sources": {"model": "Inter"},
"targets": {"model": "Relay"}, "synapse_model": "GABA_A",
"mask": {"circular": {"radius": 2.0 * dpcP}},
"weights": 1.0,
"kernel": {"gaussian": {"p_center": 0.25, "sigma": 7.5 * dpcP}}}),
('Tp', 'Tp', {"sources": {"model": "Inter"},
"targets": {"model": "Inter"}, "synapse_model": "GABA_A",
"mask": {"circular": {"radius": 2.0 * dpcP}},
"weights": 1.0,
"kernel": {"gaussian": {"p_center": 0.25, "sigma": 7.5 * dpcP}}}),
('Rp', 'Tp', {"targets": {"model": "Relay"},
"mask": {"circular": {"radius": 12.0 * dpcP}}, "synapse_model": "GABA_A",
"weights": 1.0,
"kernel": {"gaussian": {"p_center": 0.15, "sigma": 7.5 * dpcP}}}),
('Rp', 'Tp', {"targets": {"model": "Relay"},
"mask": {"circular": {"radius": 12.0 * dpcP}}, "synapse_model": "GABA_B",
"weights": 1.0,
"kernel": {"gaussian": {"p_center": 0.05, "sigma": 7.5 * dpcP}}}),
('Rp', 'Tp', {"targets": {"model": "Inter"},
"mask": {"circular": {"radius": 12.0 * dpcP}}, "synapse_model": "GABA_A",
"weights": 1.0,
"kernel": {"gaussian": {"p_center": 0.15, "sigma": 7.5 * dpcP}}}),
('Rp', 'Tp', {"targets": {"model": "Inter"},
"mask": {"circular": {"radius": 12.0 * dpcP}}, "synapse_model": "GABA_B",
"weights": 1.0,
"kernel": {"gaussian": {"p_center": 0.05, "sigma": 7.5 * dpcP}}}),
('Rp', 'Rp', {"mask": {"circular": {"radius": 12.0 * dpcP}}, "synapse_model": "GABA_B",
"weights": 1.0,
"kernel": {"gaussian": {"p_center": 0.5, "sigma": 7.5 * dpcP}}})]:
thal = thalBase.copy()
thal.update(conn)
allconns.append([src,tgt,thal])
# Now fix Gaussians
for conn in allconns:
cdict = conn[2]
kern = cdict["kernel"]
if isinstance(kern, dict) and "gaussian" in kern:
assert(cdict["connection_type"] == "divergent")
# find correct spatial-to-grid factor, depends on target (no Gaussian convergent conns.)
lam = dpcS if conn[1][:2] in ('Ts', 'Rs', 'Vs') else dpcP
# get mask size, assume here all are circular, radius is r * lam
assert("circular" in cdict["mask"])
r = cdict["mask"]["circular"]["radius"] / lam
# get current sigma, which is w * lam
sig = kern["gaussian"]["sigma"]
# compute new sigma
nsig = (2*r+1)*lam/(2*np.pi)*np.sqrt(0.5*sig/lam)
# set new sigma
kern["gaussian"]["sigma"] = nsig
# print '%10.2f -> %10.2f (lam = %10.2f)' % (sig, nsig, lam)
# Now fix masks
for conn in allconns:
cdict = conn[2]
mask = cdict["mask"]
if "circular" in mask:
# find correct spatial-to-grid factor
if cdict["connection_type"] == "divergent":
lam = dpcS if conn[1][:2] in ('Ts', 'Rs', 'Vs') else dpcP
else: # convergent, look at source
lam = dpcS if conn[0][:2] in ('Ts', 'Rs', 'Vs') else dpcP
# radius in grid units
r = mask["circular"]["radius"] / lam
# corner dislocation from center for edge length 2r+1, in spatial units
d = 0.5 * (2*r+1) * lam
# new mask
cdict["mask"]={'rectangular': {'lower_left': [-d, -d], 'upper_right': [d, d]}}
return allconns
def presim_setup(nest_layers, **kwargs):
"""
Function to call before simulating from App.
May perform some setup.
"""
pass
|
compneuronmbu/NESTConnectionApp
|
static/examples/define_hill_tononi.py
|
Python
|
gpl-2.0
| 18,392
|
[
"Gaussian"
] |
0d9dfefb6c1b59cfd0b42614302945816d71d45f3aab98457cf33da3dfe82bb5
|
# Copyright (C) 2004-2008 Paul Cochrane
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
## @file view2DMeshExample.py
"""
Example of loading a two-dimensional mesh into pyvisi and viewing it
Will hopefully help me write a decent interface.
"""
import sys,os
vtk = True
if not vtk:
# this means that one can run the script from the examples directory
sys.path.append('../')
# import the python visualisation interface
from pyvisi import *
# import vtk stuff
from pyvisi.renderers.vtk import *
# start a scene, using vtk as the renderer
scene = Scene()
scene.setBackgroundColor(1,1,1)
# load some vtk data
data = Data(scene)
data.load(file="Flinders_ranges.vtk")
# set up the plot
plot = MeshPlot(scene)
plot.setData(data)
# render the scene
scene.render(pause=True,interactive=True)
# the interactive flag means whether or not to set up and use
# a window interactor object (if available)
# put an exit in here so that we don't run the vtk code
sys.exit()
else:
# here is the original vtk code
import vtk
# set up the renderer and render window
_renderer = vtk.vtkRenderer()
_renderWindow = vtk.vtkRenderWindow()
_renderWindow.AddRenderer(_renderer)
_renderWindow.SetSize(640,480)
_renderer.SetBackground(1,1,1)
# load the vtk file
_dataReader = vtk.vtkDataSetReader()
#_dataReader.SetFileName("Flinders_ranges.vtk")
_dataReader.SetFileName("t.vtk")
# set up the data
_dataMapper = vtk.vtkDataSetMapper()
_dataMapper.SetInput(_dataReader.GetOutput())
_dataMapper.ScalarVisibilityOff() # what exactly does this do?
# set up the actor for viewing it
_dataActor = vtk.vtkActor()
_dataActor.SetMapper(_dataMapper)
_dataActor.GetProperty().SetColor(0.2,0.2,0.2)
_dataActor.GetProperty().SetRepresentationToWireframe()
# add the actor to the scene
_renderer.AddActor(_dataActor)
# now see what was produced, with interactive playing stuff
_iRenderer = vtk.vtkRenderWindowInteractor()
_iRenderer.SetRenderWindow(_renderWindow)
_iRenderer.Initialize()
_renderWindow.Render()
_iRenderer.Start()
raw_input("Press enter to continue")
# vim: expandtab shiftwidth=4:
|
paultcochrane/pyvisi
|
examples/vtkDataLoadExample.py
|
Python
|
gpl-2.0
| 2,972
|
[
"VTK"
] |
7dafe0f244c6576690a2fc75a57299d95eef7def12df1ec0fefd3aa22efac938
|
"""
gts v0.01
genetic test sequencer
Copyright 2011 Brian Monkaba
This file is part of ga-bitbot.
ga-bitbot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ga-bitbot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ga-bitbot. If not, see <http://www.gnu.org/licenses/>.
"""
import traceback
import xmlrpclib
import json
import gene_server_config
import time
import sys
import random
import subprocess
import __main__
import paths
from genetic import *
from load_config import *
random.seed(time.time())
if __name__ == "__main__":
__appversion__ = "0.01a"
print "Genetic Test Sequencer v%s"%__appversion__
# connect to the xml server
#
__server__ = gene_server_config.__server__
__port__ = str(gene_server_config.__port__)
#make sure the port number matches the server.
server = xmlrpclib.Server('http://' + __server__ + ":" + __port__)
multicall = xmlrpclib.MultiCall(server)
print "gts: connected to gene_server ",__server__,":",__port__
#the variable values below are superceded by the configuration loaded from the
#configuration file global_config.json
#!!!!!!!! to change the values edit the json configuration file NOT the variables below !!!!!!!!
max_length = 60 * 24 * 60
load_throttle = 1 #go easy on cpu usage
load_throttle_sleep_interval = 0.10#seconds
calibrate = 1 #set to one to adjust the population size to maintain a one min test cycle
cycle_time = 60 * 1#time in seconds to test the entire population
min_cycle_time = 30
cycle_time_step = 2
pid_update_rate = 20 #reset watchdog after every n genes tested
enable_flash_crash_protection = False
flash_crash_protection_delay = 60 * 3 #three hours
trusted_keys_path = "./config/trusted_keys/"
config_loaded = 0
#!!!!!!!!!!!!!!!!end of loaded config values!!!!!!!!
#define the module exit function
profile = False
def gts_exit(msg,pid=None):
global profile
if pid != None:
server.pid_msg(pid,msg)
server.pid_exit(pid)
if profile == True:
print "gts: profiler saving gts_call_graph.png to ./report/"
pycallgraph.make_dot_graph('./report/gts_call_graph.png')
print msg
sys.exit()
#load config
try:
__main__ = load_config_file_into_object('global_config.json',__main__)
except:
gts_exit("gts: error detected while loading the configuration. the application will now exit.")
else:
if config_loaded == False:
gts_exit("gts: configuration failed to load. the application will now exit.")
else:
print "gts: configuration loaded."
#internal variables
quartile_cycle = False
quartile = ''
bs = ''
verbose = False
run_once = False
get_config = False
get_default_config = False
score_only = False
profile = False
pid = None
g = genepool()
gd = "UNDEFINED"
if len(sys.argv) >= 3:
# Convert the two arguments from strings into numbers
quartile = sys.argv[1]
bs = sys.argv[2]
if len(sys.argv) > 3:
for i in range(3,len(sys.argv)):
if sys.argv[i] == 'v':
verbose = True
if sys.argv[i] == 'run_once':
#use with gal.py to auto reset (to address pypy memory leaks)
#exit after first local optima found
#or in the case of 'all' quartiles being tested, reset after once cycle through the quartiles
run_once = True
if sys.argv[i] == 'get_default_config':
#if set the default gene_def config will be loaded from the server
get_default_config = True
get_config = True
if sys.argv[i] == 'get_config':
#if set the gene_def config will be randomly loaded from the server
get_config = True
if sys.argv[i] == 'score_only':
#if set the gene_def config will be randomly loaded from the server
score_only = True
if sys.argv[i] == 'profile':
try:
import pycallgraph
except:
print "gts: pycallgraph module not installed. Profiling disabled."
else:
pycallgraph.start_trace()
profile = True
print "gts: running pycallgraph profiler"
if sys.argv[i] == 'pid':
#set the pid from the command line
try:
pid = sys.argv[i + 1]
except:
pass
if pid == None:
#if the pid is not set from the command line then
#use the genetic class object id
pid = g.id
#which quartile group to test
while not (quartile in ['1','2','3','4','all']):
print "Which quartile group to test? (1,2,3,4):"
quartile = raw_input()
if quartile != 'all':
quartile = int(quartile)
else:
quartile = 1
quartile_cycle = True
update_all_scores = True
if score_only:
update_all_scores = True
else:
update_all_scores = False
#configure the gene pool
if get_config == True:
print "gts: Loading gene_def from the server."
while gd == "UNDEFINED" and get_config == True:
#get the gene def config list from the server
gdhl = json.loads(server.get_gene_def_hash_list())
if get_default_config == True:
gdh = json.loads(server.get_default_gene_def_hash())
gdhl = [gdh,gdh,gdh] #create a dummy list with the same (default) hash
if len(gdhl) < 2:
#the default config isn't defined
#if there are less then two genes registered then switch to the local config.
get_config = False
break
#pick one at random
gdh = random.choice(gdhl)
#get the gene_def
gd = server.get_gene_def(gdh)
#print gd
if gd != "UNDEFINED":
try:
gd = json.loads(gd)
#load the remote config
g = load_config_into_object(gd,g)
#only need to register the client with the existing gene_def hash
server.pid_register_client(pid,gdh)
print "gts: gene_def_hash:",gdh
print "gts: name",gd['name']
print "gts: description",gd['description']
print "gts: gene_def load complete."
except:
print "gts: gene_def load error:",gd
gd = "UNDEFINED"
get_config = False #force load local gen_def.json config
else:
time.sleep(5) #default config is undefined so just wait and try again....
#the script will remain in this loop until the default config is set
if get_config == False:
gd = load_config_from_file("gene_def.json")
g = load_config_into_object(gd,g)
#register the gene_def file and link to this client using the gene pool id as the PID (GUID)
f = open('./config/gene_def.json','r')
gdc = f.read()
f.close()
gdh = server.pid_register_gene_def(pid,gdc)
server.pid_register_client(pid,gdh)
#reset the process watchdog
server.pid_alive(pid)
#send a copy of the command line args
server.pid_msg(pid,str(sys.argv))
ff = None
if gd.has_key('fitness_script'):
#check for an updated signed package on the gene_server
#pypy probably wont have pycrypto installed - fall back to python in a subprocess to sync
#fitness module names in the gene_def exclude the .py file extention
#but signed packages use the extention. check for extention, if none exists then add .py
print "gts: synchronizing signed code"
if len(gd['fitness_script'].split('.')) == 1:
sync_filename = gd['fitness_script'] + '.py'
subprocess.call(('python','cpsu.py','get',sync_filename,trusted_keys_path))
print "gts: loading the fitness module",gd['fitness_script']
ff = __import__(gd['fitness_script'])
else:
print "gts: no fitness module defined, loading default (bct)"
ff = __import__('bct')
te = ff.trade_engine()
#apply global configs
te.max_length = max_length
te.enable_flash_crash_protection = enable_flash_crash_protection
te.flash_crash_protection_delay = flash_crash_protection_delay
#load the gene_def fitness_config, if available
if gd.has_key('fitness_config'):
te = load_config_into_object(gd['fitness_config'],te)
te.score_only = True
print "gts: initializing the fitness function"
te.initialize()
#bootstrap the population with the winners available from the gene_pool server
while not(bs == 'y' or bs == 'n'):
print "Bootstrap from the gene_server? (y/n)"
bs = raw_input()
if bs == 'y':
bob_simulator = True
g.local_optima_trigger = 10
bootstrap_bobs = json.loads(server.get_bobs(quartile,pid))
bootstrap_all = json.loads(server.get_all(quartile,pid))
if (type(bootstrap_bobs) == type([])) and (type(bootstrap_all) == type([])):
g.seed()
if len(bootstrap_all) > 100:
g.pool = []
g.insert_genedict_list(bootstrap_bobs)
g.insert_genedict_list(bootstrap_all)
g.pool_size = len(g.pool)
if update_all_scores == True:
#reset the scores for retesting
g.reset_scores()
else:
#mate the genes before testing
g.next_gen()
else: #if no BOBS or high scores..seed with a new population
print "gts: no BOBs or high scores available...seeding new pool."
g.seed()
print "gts: Update all scores:",update_all_scores
print "gts: %s BOBs loaded"%len(bootstrap_bobs)
print "gts: %s high scores loaded"%len(bootstrap_all)
print "gts: Pool size: %s"%len(g.pool)
else:
bob_simulator = False
#update_all_scores = False
g.local_optima_trigger = 5
print "gts: Seeding the initial population"
g.seed()
#the counters are all incremented at the same time but are reset by different events:
test_count = 0 #used to reset the pool after so many loop cycles
total_count = 0 #used to calculate overall performance
loop_count = 0 # used to trigger pool size calibration and data reload
max_score = -100000
max_score_id = -1
max_gene = None
multicall_count = 0
start_time = time.time()
print "gts: running the test sequencer"
while 1:
test_count += 1
total_count += 1
loop_count += 1
if load_throttle == 1:
time.sleep(load_throttle_sleep_interval)
if total_count%pid_update_rate == 0:
#periodicaly reset the watchdog monitor
print "gts: resetting watchdog timer"
server.pid_alive(pid)
if loop_count > g.pool_size:
if score_only: #quartile_cycle == True and bob_simulator == True:
#force a state jump to load the next quartile to retest the genes
#in this mode the only function of the client is to cycle through the quartiles to retest existing genes
g.local_optima_reached = True
#update_all_scores = False #on the first pass only, bob clients need to resubmit updated scores for every gene
loop_count = 0
#reset the watchdog monitor
server.pid_alive(pid)
#benchmark the cycle speed
current_time = time.time()
elapsed_time = current_time - start_time
gps = total_count / elapsed_time
pid_update_rate = int(gps * 40)
if calibrate == 1:
print "gts: recalibrating pool size..."
g.pool_size = int(gps * cycle_time)
cycle_time -= cycle_time_step
if cycle_time < min_cycle_time:
cycle_time = min_cycle_time
if g.pool_size > 10000:
g.pool_size = 10000
kss = (gps*te.input_data_length)/1000.0
performance_metrics = "gts: ","%.2f"%gps,"G/S; ","%.2f"%kss,"KS/S;"," Pool Size: ",g.pool_size," Total Processed: ",total_count
performance_metrics = " ".join(map(str,performance_metrics))
print performance_metrics
pmd = {'channel':'gts_metric','gps':gps,'kss':kss,'pool':g.pool_size,'total':total_count}
server.pid_msg(pid,json.dumps(pmd))
if g.local_optima_reached:
test_count = 0
#initialize fitness function (load updated data)
te.initialize()
if score_only: #quartile_cycle == True and bob_simulator == True:
#jump to the next quartile and skip the bob submission
update_all_scores = True
quartile += 1
if quartile > 4:
quartile = 1
if run_once:
print "gts: flushing xmlrpc multicall buffer."
multicall() #send any batched calls to the server
print "gts: run once done."
gts_exit("gts: run once done.",pid)
elif max_gene != None:
#debug
print "gts: ",max_gene
#end debug
print "gts: submit BOB for id:%s to server (%.2f)"%(str(max_gene['id']),max_gene['score'])
server.put_bob(json.dumps(max_gene),quartile,pid)
if quartile_cycle == True:
#if cycling is enabled then
#the client will cycle through the quartiles as local optimas are found
#jump to the next quartile
quartile += 1
if quartile > 4:
quartile = 1
if run_once:
gts_exit("gts: run once done.",pid)
else:
if max_score > -1000:
print "gts: **WARNING** MAX_GENE is gone.: ID",max_score_id
print "*"*80
print "gts: GENE DUMP:"
for ag in g.pool:
print ag['id'],ag['score']
print "*"*80
gts_exit("gts: HALTED.",pid)
max_gene = None #clear the max gene
max_score = -100000 #reset the high score
if quartile_cycle == False and run_once:
print "gts: flushing xmlrpc multicall buffer."
multicall() #send any batched calls to the server
print "gts: run once done."
gts_exit("gts: run once done.",pid)
if bob_simulator:
#update_all_scores = True #on the first pass only, bob clients need to resubmit updated scores for every gene
bootstrap_bobs = json.loads(server.get_bobs(quartile,pid))
bootstrap_all = json.loads(server.get_all(quartile,pid))
g.pool_size = len(g.pool)
if (type(bootstrap_bobs) == type([])) and (type(bootstrap_all) == type([])):
g.seed()
g.pool = []
g.insert_genedict_list(bootstrap_bobs)
g.insert_genedict_list(bootstrap_all)
if quartile_cycle == True:
#reset the scores for retesting
g.reset_scores()
else:
#mate the genes before testing
g.next_gen()
else: #if no BOBS or high scores..seed with a new population
#print "no BOBs or high scores available...seeding new pool."
g.seed()
else:
g.seed()
if test_count > (g.pool_size * 10):
test_count = 0
print "gts: reseting scores to force retest of winners..."
test_count = 0
max_score = 0 #knock the high score down to prevent blocking
#latest scoring data which may fall due to
#the latest price data
g.next_gen()
g.reset_scores()
#create/reset the trade engine
te.reset()
#get the next gene
ag = g.get_next()
#configure the trade engine
te = load_config_into_object({'set':ag},te)
#set the quartile to test
te.test_quartile(quartile)
#run the fitness function
try:
te.run()
except Exception, err:
#kill off any genes that crash the trade engine (div by 0 errors for instance)
print "gts: ***** GENE FAULT *****"
print Exception,err
print traceback.format_exc()
print "gts: ***** END GENE FAULT *****"
g.set_score(ag['id'],g.kill_score)
else:
#return the score to the gene pool
try:
score = te.score()
except Exception, err:
#kill off any genes that crash the trade engine (div by 0 errors for instance)
print "gts: ***** GENE SCORE FAULT *****"
print Exception,err
print traceback.format_exc()
print "gts: ***** END GENE SCORE FAULT *****"
g.set_score(ag['id'],g.kill_score)
else:
if verbose:
print "gts: ",ag['gene'],"\t".join(["%.5f"%max_score,"%.5f"%score,"%.3f"%g.prune_threshold])
g.set_score(ag['id'],score)
#g.set_message(ag['id'],"Balance: " + str(te.balance) +"; Wins: " + str(te.wins)+ "; Loss:" + str(te.loss) + "; Positions: " + str(len(te.positions)))
g.set_message(ag['id'],te.text_summary)
if score > 1000 and profile == True:
gts_exit("gts: profiling complete")
#if a new high score is found submit the gene to the server
if score > max_score and update_all_scores == False:
print "gts: submit high score for quartile:%s id:%s to server (%.5f)"%(str(quartile),str(ag['id']),score)
max_score = score
max_score_id = ag['id']
max_gene = ag.copy() #g.get_by_id(max_score_id)
if max_gene != None:
server.put(json.dumps(max_gene),quartile,pid)
else:
print "gts: MAX_GENE is None!!"
if update_all_scores == True:
print "gts: updating score for quartile:%s id:%s to server, multicall deffered (%.5f)"%(str(quartile),str(ag['id']),score)
agene = g.get_by_id(ag['id'])
if agene != None:
multicall_count += 1
multicall.mc_put(json.dumps(agene),quartile,pid)
if multicall_count > 40:
multicall_count = 0
print "gts: flushing xmlrpc multicall buffer."
multicall()
else:
print "gts: updating gene error: gene is missing!!"
|
stahn/ga-bitbot
|
gts.py
|
Python
|
gpl-3.0
| 20,222
|
[
"Brian"
] |
c5106baff07f67ad833f3c246c19c2129b056bf1f965211601c82bb17ab9a668
|
"""
Acceptance tests for Studio's Setting pages
"""
from unittest import skip
from .base_studio_test import StudioCourseTest
from ...pages.studio.settings_certificates import CertificatesPage
class CertificatesTest(StudioCourseTest):
"""
Tests for settings/certificates Page.
"""
def setUp(self): # pylint: disable=arguments-differ
super(CertificatesTest, self).setUp(is_staff=True)
self.certificates_page = CertificatesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def make_signatory_data(self, prefix='First'):
"""
Makes signatory dict which can be used in the tests to create certificates
"""
return {
'name': '{prefix} Signatory Name'.format(prefix=prefix),
'title': '{prefix} Signatory Title'.format(prefix=prefix),
'organization': '{prefix} Signatory Organization'.format(prefix=prefix),
}
def create_and_verify_certificate(self, course_title_override, existing_certs, signatories):
"""
Creates a new certificate and verifies that it was properly created.
"""
self.assertEqual(existing_certs, len(self.certificates_page.certificates))
if existing_certs == 0:
self.certificates_page.wait_for_first_certificate_button()
self.certificates_page.click_first_certificate_button()
else:
self.certificates_page.wait_for_add_certificate_button()
self.certificates_page.click_add_certificate_button()
certificate = self.certificates_page.certificates[existing_certs]
# Set the certificate properties
certificate.course_title = course_title_override
# add signatories
added_signatories = 0
for idx, signatory in enumerate(signatories):
certificate.signatories[idx].name = signatory['name']
certificate.signatories[idx].title = signatory['title']
certificate.signatories[idx].organization = signatory['organization']
certificate.signatories[idx].upload_signature_image('Signature-{}.png'.format(idx))
added_signatories += 1
if len(signatories) > added_signatories:
certificate.click_add_signatory_button()
# Save the certificate
self.assertEqual(certificate.get_text('.action-primary'), "Create")
certificate.click_create_certificate_button()
self.assertIn(course_title_override, certificate.course_title)
return certificate
def test_no_certificates_by_default(self):
"""
Scenario: Ensure that message telling me to create a new certificate is
shown when no certificate exist.
Given I have a course without certificates
When I go to the Certificates page in Studio
Then I see "You have not created any certificates yet." message
"""
self.certificates_page.visit()
self.assertTrue(self.certificates_page.no_certificates_message_shown)
self.assertIn(
"You have not created any certificates yet.",
self.certificates_page.no_certificates_message_text
)
def test_can_create_and_edit_certficate(self):
"""
Scenario: Ensure that the certificates can be created and edited correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set new the course title override and signatory and click the button 'Create'
Then I see the new certificate is added and has correct data
When I edit the certificate
And I change the name and click the button 'Save'
Then I see the certificate is saved successfully and has the new name
"""
self.certificates_page.visit()
self.certificates_page.wait_for_first_certificate_button()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first'), self.make_signatory_data('second')]
)
# Edit the certificate
certificate.click_edit_certificate_button()
certificate.course_title = "Updated Course Title Override 2"
self.assertEqual(certificate.get_text('.action-primary'), "Save")
certificate.click_save_certificate_button()
self.assertIn("Updated Course Title Override 2", certificate.course_title)
@skip # TODO fix this, see SOL-1053
def test_can_delete_certificate(self):
"""
Scenario: Ensure that the user can delete certificate.
Given I have a course with 1 certificate
And I go to the Certificates page
When I delete the Certificate with name "New Certificate"
Then I see that there is no certificate
When I refresh the page
Then I see that the certificate has been deleted
"""
self.certificates_page.visit()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first'), self.make_signatory_data('second')]
)
certificate.wait_for_certificate_delete_button()
self.assertEqual(len(self.certificates_page.certificates), 1)
# Delete the certificate we just created
certificate.click_delete_certificate_button()
self.certificates_page.click_confirmation_prompt_primary_button()
# Reload the page and confirm there are no certificates
self.certificates_page.visit()
self.assertEqual(len(self.certificates_page.certificates), 0)
def test_can_create_and_edit_signatories_of_certficate(self):
"""
Scenario: Ensure that the certificates can be created with signatories and edited correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set new the course title override and signatory and click the button 'Create'
Then I see the new certificate is added and has one signatory inside it
When I click 'Edit' button of signatory panel
And I set the name and click the button 'Save' icon
Then I see the signatory name updated with newly set name
When I refresh the certificates page
Then I can see course has one certificate with new signatory name
When I click 'Edit' button of signatory panel
And click on 'Close' button
Then I can see no change in signatory detail
"""
self.certificates_page.visit()
certificate = self.create_and_verify_certificate(
"Course Title Override",
0,
[self.make_signatory_data('first')]
)
self.assertEqual(len(self.certificates_page.certificates), 1)
# Edit the signatory in certificate
signatory = certificate.signatories[0]
signatory.edit()
signatory.name = 'Updated signatory name'
signatory.title = 'Update signatory title'
signatory.organization = 'Updated signatory organization'
signatory.save()
self.assertEqual(len(self.certificates_page.certificates), 1)
#Refreshing the page, So page have the updated certificate object.
self.certificates_page.refresh()
signatory = self.certificates_page.certificates[0].signatories[0]
self.assertIn("Updated signatory name", signatory.name)
self.assertIn("Update signatory title", signatory.title)
self.assertIn("Updated signatory organization", signatory.organization)
signatory.edit()
signatory.close()
self.assertIn("Updated signatory name", signatory.name)
def test_can_cancel_creation_of_certificate(self):
"""
Scenario: Ensure that creation of a certificate can be canceled correctly.
Given I have a course without certificates
When I click button 'Add your first Certificate'
And I set name of certificate and click the button 'Cancel'
Then I see that there is no certificates in the course
"""
self.certificates_page.visit()
self.certificates_page.click_first_certificate_button()
certificate = self.certificates_page.certificates[0]
certificate.course_title = "Title Override"
certificate.click_cancel_edit_certificate()
self.assertEqual(len(self.certificates_page.certificates), 0)
|
appliedx/edx-platform
|
common/test/acceptance/tests/studio/test_studio_settings_certificates.py
|
Python
|
agpl-3.0
| 8,564
|
[
"VisIt"
] |
c28686c8e9df4523e980f8d41a22202381080f492f590cdc313769dc528eea34
|
#!/usr/bin/env python
from __future__ import division
from setuptools import setup
import os
__author__ = "Daniel McDonald"
__copyright__ = "Copyright 2011, The tax2tree project"
__credits__ = ["Daniel McDonald"]
__license__ = "BSD"
__version__ = "1.0-dev"
__maintainer__ = "Daniel McDonald"
__email__ = "mcdonadt@colorado.edu"
__status__ = "Development"
classes = """
Development Status :: 4 - Beta
License :: OSI Approved :: BSD License
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Operating System :: Unix
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
long_description = """The tax2tree project"""
# If readthedocs.org is building the project, we're not able to build the
# required numpy/scipy versions on their machines (nor do we want to, as that
# would take a long time). To build the docs, we don't need the latest versions
# of these dependencies anyways, so we use whatever is in their system's
# site-packages to make scikit-bio importable. See doc/rtd-requirements.txt for
# dependencies that RTD must install in order to build our docs.
#
# Code to check whether RTD is building our project is taken from
# http://read-the-docs.readthedocs.org/en/latest/faq.html
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
install_requires = []
else:
install_requires = ['numpy >= 1.7', 'future>=0.13.1', 'scikit-bio',
'Click', 'iow']
setup(name='tax2tree',
version=__version__,
description='Taxonomy to tree decoration tools',
author=__maintainer__,
author_email=__email__,
maintainer=__maintainer__,
maintainer_email=__email__,
url='https://github.com/biocore/tax2tree',
packages=['t2t'],
scripts=['scripts/t2t'],
install_requires=install_requires,
extras_require={'test': ['nose >= 0.10.1', 'pep8'],
'doc': ['Sphinx >= 1.2.2']},
long_description=long_description)
|
biocore/tax2tree
|
setup.py
|
Python
|
bsd-3-clause
| 2,268
|
[
"scikit-bio"
] |
66d3e418096241de44b4fdfdcd7dee24d6bc9060cbe8341e0ebf773f821f2d56
|
import discord
from discord.ext import commands
from __main__ import send_cmd_help
from bs4 import BeautifulSoup
import random
class sfw:
"""sfw commands."""
def __init__(self, bot):
self.bot = bot
self.session = self.bot.http.session
@commands.group(pass_context=True)
async def sfw(self, ctx):
"""sfw Commands"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@sfw.command(no_pm=True)
async def yandere(self):
"""Random Image From Yandere"""
try:
query = ("https://yande.re/post/random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="highres").get("href")
await self.bot.say(image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def konachan(self):
"""Random Image From Konachan"""
try:
query = ("https://konachan.com/post/random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="highres").get("href")
await self.bot.say(image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def e621(self):
"""Random Image From e621"""
try:
query = ("https://e621.net/post/random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="highres").get("href")
await self.bot.say(image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def rule34(self):
"""Random Image From rule34"""
try:
query = ("http://rule34.xxx/index.php?page=post&s=random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="image").get("src")
await self.bot.say('http:' + image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def danbooru(self):
"""Random Image From Danbooru"""
try:
query = ("http://danbooru.donmai.us/posts/random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="image").get("src")
await self.bot.say('http://danbooru.donmai.us' + image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def gelbooru(self):
"""Random Image From Gelbooru"""
try:
query = ("http://www.gelbooru.com/index.php?page=post&s=random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="image").get("src")
await self.bot.say(image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def tbib(self):
"""Random Image From DrunkenPumken"""
try:
query = ("http://www.tbib.org/index.php?page=post&s=random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="image").get("src")
await self.bot.say("http:" + image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def xbooru(self):
"""Random Image From Xbooru"""
try:
query = ("http://xbooru.com/index.php?page=post&s=random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="image").get("src")
await self.bot.say(image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def furrybooru(self):
"""Random Image From Furrybooru"""
try:
query = ("http://furry.booru.org/index.php?page=post&s=random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="image").get("src")
await self.bot.say(image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def drunkenpumken(self):
"""Random Image From DrunkenPumken"""
try:
query = ("http://drunkenpumken.booru.org/index.php?page=post&s=random")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="image").get("src")
await self.bot.say(image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(no_pm=True)
async def lolibooru(self):
"""Random Image From Lolibooru"""
try:
query = ("https://lolibooru.moe/post/random/")
page = await self.session.get(query)
page = await page.text()
soup = BeautifulSoup(page, 'html.parser')
image = soup.find(id="image").get("src")
image = image.replace(' ','%20')
await self.bot.say(image)
except Exception as e:
await self.bot.say(":x: **Error:** `{}`".format(e))
@sfw.command(pass_context=True, no_pm=True)
async def ysearch(self, ctx, *tags: str):
"""Search Yandere With A Tag"""
if tags == ():
await self.bot.say(":warning: Tags are missing.")
else:
try:
tags = ("+").join(tags)
query = ("https://yande.re/post.json?limit=42&tags=" + tags)
page = await self.session.get(query)
json = await page.json()
if json != []:
await self.bot.say(random.choice(json)['jpeg_url'])
else:
await self.bot.say(":warning: Yande.re has no images for requested tags.")
except Exception as e:
await self.bot.say(":x: `{}`".format(e))
def setup(bot):
n = sfw(bot)
bot.add_cog(n)
|
Vidyapoky/keksimus
|
nsfw/sfw.py
|
Python
|
mit
| 6,946
|
[
"MOE"
] |
154554c21d90e78191897e0b7dc3da9c3bbe9365d50d9a1b5ffe84fb1c1129fb
|
"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespectively of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import pylab as pl
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = pl.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
pl.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
pl.loglog(n_samples_range, min_n_components, color=color)
pl.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
pl.xlabel("Number of observations to eps-embed")
pl.ylabel("Minimum number of dimensions")
pl.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
pl.show()
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = pl.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
pl.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
pl.semilogy(eps_range, min_n_components, color=color)
pl.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
pl.xlabel("Distortion eps")
pl.ylabel("Minimum number of dimensions")
pl.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
pl.show()
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
pl.figure()
pl.hexbin(dists, projected_dists, gridsize=100)
pl.xlabel("Pairwise squared distances in original space")
pl.ylabel("Pairwise squared distances in projected space")
pl.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = pl.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
pl.figure()
pl.hist(rates, bins=50, normed=True, range=(0., 2.))
pl.xlabel("Squared distances rate: projected / original")
pl.ylabel("Distribution of samples pairs")
pl.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
pl.show()
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
|
JT5D/scikit-learn
|
examples/plot_johnson_lindenstrauss_bound.py
|
Python
|
bsd-3-clause
| 7,418
|
[
"Gaussian"
] |
b4ed74d7eeb697e83f76d48f7f29e51fe37f69e9e10c31ee624e0e84fde4f1b7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.